Setup & Import

library(tidyverse)
## Warning: Paket 'tidyverse' wurde unter R Version 4.2.3 erstellt
## Warning: Paket 'tibble' wurde unter R Version 4.2.3 erstellt
## Warning: Paket 'tidyr' wurde unter R Version 4.2.3 erstellt
## Warning: Paket 'readr' wurde unter R Version 4.2.3 erstellt
## Warning: Paket 'purrr' wurde unter R Version 4.2.3 erstellt
## Warning: Paket 'dplyr' wurde unter R Version 4.2.3 erstellt
## Warning: Paket 'stringr' wurde unter R Version 4.2.3 erstellt
## Warning: Paket 'forcats' wurde unter R Version 4.2.3 erstellt
## Warning: Paket 'lubridate' wurde unter R Version 4.2.3 erstellt
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ readr     2.1.5
## ✔ forcats   1.0.0     ✔ stringr   1.5.1
## ✔ ggplot2   3.5.2     ✔ tibble    3.2.1
## ✔ lubridate 1.9.3     ✔ tidyr     1.3.1
## ✔ purrr     1.0.2     
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(lme4)
## Lade nötiges Paket: Matrix
## 
## Attache Paket: 'Matrix'
## 
## Die folgenden Objekte sind maskiert von 'package:tidyr':
## 
##     expand, pack, unpack
library(lmerTest)
## Warning: Paket 'lmerTest' wurde unter R Version 4.2.3 erstellt
## 
## Attache Paket: 'lmerTest'
## 
## Das folgende Objekt ist maskiert 'package:lme4':
## 
##     lmer
## 
## Das folgende Objekt ist maskiert 'package:stats':
## 
##     step
library(sjPlot)
library(data.table)
## 
## Attache Paket: 'data.table'
## 
## Die folgenden Objekte sind maskiert von 'package:lubridate':
## 
##     hour, isoweek, mday, minute, month, quarter, second, wday, week,
##     yday, year
## 
## Die folgenden Objekte sind maskiert von 'package:dplyr':
## 
##     between, first, last
## 
## Das folgende Objekt ist maskiert 'package:purrr':
## 
##     transpose
library(dplyr)
library(plyr)
## Warning: Paket 'plyr' wurde unter R Version 4.2.3 erstellt
## ------------------------------------------------------------------------------
## You have loaded plyr after dplyr - this is likely to cause problems.
## If you need functions from both plyr and dplyr, please load plyr first, then dplyr:
## library(plyr); library(dplyr)
## ------------------------------------------------------------------------------
## 
## Attache Paket: 'plyr'
## 
## Die folgenden Objekte sind maskiert von 'package:dplyr':
## 
##     arrange, count, desc, failwith, id, mutate, rename, summarise,
##     summarize
## 
## Das folgende Objekt ist maskiert 'package:purrr':
## 
##     compact
library(tidyr)
library(ggplot2)
library(stringr)
library(readr)
library(psych)
## 
## Attache Paket: 'psych'
## 
## Die folgenden Objekte sind maskiert von 'package:ggplot2':
## 
##     %+%, alpha
library(performance)
library(corrr)
## Warning: Paket 'corrr' wurde unter R Version 4.2.3 erstellt
library(purrr)
library(corrplot)
## corrplot 0.95 loaded
library(multilevel)
## Warning: Paket 'multilevel' wurde unter R Version 4.2.3 erstellt
## Lade nötiges Paket: nlme
## 
## Attache Paket: 'nlme'
## 
## Das folgende Objekt ist maskiert 'package:lme4':
## 
##     lmList
## 
## Das folgende Objekt ist maskiert 'package:dplyr':
## 
##     collapse
## 
## Lade nötiges Paket: MASS
## 
## Attache Paket: 'MASS'
## 
## Das folgende Objekt ist maskiert 'package:dplyr':
## 
##     select
library(emmeans)
## Welcome to emmeans.
## Caution: You lose important information if you filter this package's results.
## See '? untidy'
library(tibble)
library(rmcorr)

data <- read.csv("otree_cleaned1.csv")
data_old <- read.csv("flow_reports_exp1.csv")
data_old_rounds <- read.csv("round_reports_exp1.csv")
data_old_final <- read.csv("final_reports_exp1.csv")

Data quality check

# Datenqualitäts-Check für unaufmerksame Antworten
# ================================================================================
# STRAIGHT-LINING DETECTION - BASE R ANSATZ (FUNKTIONIERT GARANTIERT)
# ================================================================================

print("\n=== STRAIGHT-LINING DETECTION ===")
## [1] "\n=== STRAIGHT-LINING DETECTION ==="
# FSS-Spalten finden
fss_columns <- names(data)[grep("fss\\d+", names(data), ignore.case = TRUE)]
print(paste("Arbeite mit", length(fss_columns), "FSS-Spalten"))
## [1] "Arbeite mit 208 FSS-Spalten"
# Funktion zur Berechnung der Antwort-Variabilität
calculate_response_variability <- function(values) {
  # Nur numerische Werte extrahieren
  numeric_values <- as.numeric(values)
  numeric_values <- numeric_values[!is.na(numeric_values)]
  
  if(length(numeric_values) < 2) {
    return(list(
      n_responses = length(numeric_values),
      sd = NA, 
      n_unique = length(numeric_values), 
      straightline = FALSE,
      min_val = ifelse(length(numeric_values) > 0, min(numeric_values), NA),
      max_val = ifelse(length(numeric_values) > 0, max(numeric_values), NA)
    ))
  }
  
  # Statistiken berechnen
  sd_responses <- sd(numeric_values, na.rm = TRUE)
  n_unique <- length(unique(numeric_values))
  min_val <- min(numeric_values)
  max_val <- max(numeric_values)
  
  # Straight-lining: SD = 0 oder nur ein einzigartiger Wert
  straightline <- (sd_responses == 0 | n_unique == 1) & length(numeric_values) > 1
  
  return(list(
    n_responses = length(numeric_values),
    sd = sd_responses, 
    n_unique = n_unique, 
    straightline = straightline,
    min_val = min_val,
    max_val = max_val
  ))
}

# Erstelle leeren data.frame für Ergebnisse
fss_variability <- data.frame(
  participant_code = character(),
  n_fss_responses = numeric(),
  fss_sd = numeric(),
  fss_n_unique = numeric(),
  fss_min = numeric(),
  fss_max = numeric(),
  fss_straightline = logical(),
  stringsAsFactors = FALSE
)

# Gehe durch jeden Teilnehmer
unique_participants <- unique(data$participant.code)
print(paste("Analysiere", length(unique_participants), "Teilnehmer"))
## [1] "Analysiere 120 Teilnehmer"
for(i in 1:length(unique_participants)) {
  participant <- unique_participants[i]
  
  # Hole alle FSS-Daten für diesen Teilnehmer
  participant_data <- data[data$participant.code == participant, fss_columns, drop = FALSE]
  
  # Alle FSS-Werte für diesen Teilnehmer sammeln
  all_fss_values <- unlist(participant_data)
  
  # Berechne Variabilität
  variability <- calculate_response_variability(all_fss_values)
  
  # Füge zu Ergebnissen hinzu
  fss_variability <- rbind(fss_variability, data.frame(
    participant_code = participant,
    n_fss_responses = variability$n_responses,
    fss_sd = variability$sd,
    fss_n_unique = variability$n_unique,
    fss_min = variability$min_val,
    fss_max = variability$max_val,
    fss_straightline = variability$straightline,
    stringsAsFactors = FALSE
  ))
  
  # Fortschritt anzeigen
  if(i %% 20 == 0) {
    print(paste("Verarbeitet:", i, "von", length(unique_participants)))
  }
}
## [1] "Verarbeitet: 20 von 120"
## [1] "Verarbeitet: 40 von 120"
## [1] "Verarbeitet: 60 von 120"
## [1] "Verarbeitet: 80 von 120"
## [1] "Verarbeitet: 100 von 120"
## [1] "Verarbeitet: 120 von 120"
print("\n--- ERGEBNISSE ---")
## [1] "\n--- ERGEBNISSE ---"
# Zeige Teilnehmer mit Straight-Lining
straightliners <- fss_variability[fss_variability$fss_straightline == TRUE, ]
print(paste("Teilnehmer mit Straight-Lining gefunden:", nrow(straightliners)))
## [1] "Teilnehmer mit Straight-Lining gefunden: 0"
if(nrow(straightliners) > 0) {
  print("\nTeilnehmer mit Straight-Lining bei FSS-Items:")
  print(straightliners)
} else {
  print("Keine Straight-Liner gefunden.")
}
## [1] "Keine Straight-Liner gefunden."
# Zeige Teilnehmer mit sehr niedriger Variabilität (potentielle Probleme)
low_variability <- fss_variability[!is.na(fss_variability$fss_sd) & 
                                  fss_variability$fss_sd < 0.5 & 
                                  fss_variability$n_fss_responses > 5, ]

print(paste("\nTeilnehmer mit sehr niedriger Variabilität (SD < 0.5):", nrow(low_variability)))
## [1] "\nTeilnehmer mit sehr niedriger Variabilität (SD < 0.5): 1"
if(nrow(low_variability) > 0) {
  print(low_variability)
}
##     participant_code n_fss_responses    fss_sd fss_n_unique fss_min fss_max
## 106         4god9rit              91 0.2291354            2       5       6
##     fss_straightline
## 106            FALSE
# ================================================================================
# DESKRIPTIVE STATISTIKEN
# ================================================================================

print("\n--- DESKRIPTIVE STATISTIKEN ---")
## [1] "\n--- DESKRIPTIVE STATISTIKEN ---"
# Übersicht über Antwort-Variabilität
print("Verteilung der Standardabweichungen:")
## [1] "Verteilung der Standardabweichungen:"
print(summary(fss_variability$fss_sd))
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##  0.2291  1.1113  1.3836  1.3651  1.6496  2.2171
print("\nVerteilung der Anzahl eindeutiger Antworten:")
## [1] "\nVerteilung der Anzahl eindeutiger Antworten:"
print(summary(fss_variability$fss_n_unique))
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##    2.00    6.00    6.00    6.15    7.00    7.00
print("\nVerteilung der Anzahl Antworten pro Person:")
## [1] "\nVerteilung der Anzahl Antworten pro Person:"
print(summary(fss_variability$n_fss_responses))
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
##      91      91     104     104     117     117
# Histogramm der Standardabweichungen (falls möglich)
if(require(ggplot2, quietly = TRUE)) {
  p <- ggplot(fss_variability, aes(x = fss_sd)) +
    geom_histogram(bins = 20, fill = "skyblue", alpha = 0.7) +
    geom_vline(xintercept = 0.5, color = "red", linetype = "dashed") +
    labs(title = "Verteilung der FSS-Antwort-Variabilität",
         x = "Standardabweichung der FSS-Antworten",
         y = "Anzahl Teilnehmer") +
    theme_minimal()
  
  print(p)
}

# ================================================================================
# ZUSÄTZLICHE QUALITÄTSCHECKS
# ================================================================================

print("\n--- ZUSÄTZLICHE QUALITÄTSCHECKS ---")
## [1] "\n--- ZUSÄTZLICHE QUALITÄTSCHECKS ---"
# 1. Fehlende Daten pro FSS-Item
missing_per_item <- sapply(fss_columns, function(col) {
  sum(is.na(data[[col]]))
})

items_with_missing <- data.frame(
  item = names(missing_per_item),
  missing_count = missing_per_item,
  missing_percent = round(100 * missing_per_item / nrow(data), 2)
)

# Zeige Items mit viel fehlenden Daten
high_missing <- items_with_missing[items_with_missing$missing_percent > 50, ]
if(nrow(high_missing) > 0) {
  print("\nFSS-Items mit >50% fehlenden Daten:")
  print(high_missing)
} else {
  print("\nKeine FSS-Items mit kritisch vielen fehlenden Daten gefunden.")
}
## [1] "\nKeine FSS-Items mit kritisch vielen fehlenden Daten gefunden."
# 2. Extreme Werte
extreme_low <- fss_variability[!is.na(fss_variability$fss_sd) & 
                              fss_variability$fss_sd < 0.1 & 
                              fss_variability$n_fss_responses > 10, ]

if(nrow(extreme_low) > 0) {
  print(paste("\nTeilnehmer mit extrem niedriger Variabilität (SD < 0.1):", nrow(extreme_low)))
  print(extreme_low)
}

# 3. Speichere Ergebnisse für weitere Verwendung
fss_quality_results <- fss_variability
print(paste("\nErgebnisse in 'fss_quality_results' gespeichert (", nrow(fss_quality_results), " Teilnehmer)"))
## [1] "\nErgebnisse in 'fss_quality_results' gespeichert ( 120  Teilnehmer)"
# 4. Empfehlungen für Ausschluss
exclusion_candidates <- fss_variability[
  (fss_variability$fss_straightline == TRUE) |
  (!is.na(fss_variability$fss_sd) & fss_variability$fss_sd < 0.1 & fss_variability$n_fss_responses > 10),
]

print(paste("\nEmpfohlene Ausschluss-Kandidaten:", nrow(exclusion_candidates)))
## [1] "\nEmpfohlene Ausschluss-Kandidaten: 0"
if(nrow(exclusion_candidates) > 0) {
  print("Teilnehmer zur Überprüfung:")
  print(exclusion_candidates[, c("participant_code", "fss_sd", "fss_n_unique", "fss_straightline")])
}

print("\n=== STRAIGHT-LINING DETECTION ABGESCHLOSSEN ===")
## [1] "\n=== STRAIGHT-LINING DETECTION ABGESCHLOSSEN ==="

Descriptive statistics

# Demografische Übersicht
demographics <- data %>%
  summarise(
    # Geschlecht
    female_pct = mean(Intro.1.player.gender == "Female", na.rm = TRUE) * 100,
    male_pct = mean(Intro.1.player.gender == "Male", na.rm = TRUE) * 100,
    # Alter
    age_mean = mean(Intro.1.player.age, na.rm = TRUE),
    age_sd = sd(Intro.1.player.age, na.rm = TRUE),
    # Händigkeit
    right_handed_pct = mean(Intro.1.player.dominant_hand == "Right", na.rm = TRUE) * 100
  )

print(round(demographics, 1))
##   female_pct male_pct age_mean age_sd right_handed_pct
## 1       31.7     68.3     24.6    5.4             94.2
# Englischkenntnisse detailliert
cat("\nEnglischkenntnisse:\n")
## 
## Englischkenntnisse:
english_table <- prop.table(table(data$Intro.1.player.english)) * 100
print(round(english_table, 1))
## 
##               A2 Elementary English – I can understand sentences and frequently used expressions related to areas of most immediate relevance. 
##                                                                                                                                            0.8 
##          B1 Intermediate English – I understand the main points of clear input on familiar matters regularly encountered in work, school, etc. 
##                                                                                                                                            2.5 
## B2 Upper-Intermediate English – I understand the main ideas of complex text on concrete and abstract topics, incl. technical discussions, etc. 
##                                                                                                                                           14.2 
##                                C1 Advanced English – I can understand a wide range of demanding, longer texts, and recognise implicit meaning. 
##                                                                                                                                           43.3 
##                                                        C2 Proficient English – I can understand with ease virtually everything I hear or read. 
##                                                                                                                                           39.2
# Occupation und Field of Study - nur Top 5
cat("\nTop 5 Occupations:\n")
## 
## Top 5 Occupations:
head(sort(table(data$Intro.1.player.occupation), decreasing = TRUE), 5)
## 
##    Student   Employee      Other Unemployed 
##        113          4          2          1
cat("\nTop 5 Fields of Study:\n")
## 
## Top 5 Fields of Study:
head(sort(table(data$Intro.1.player.field_of_study), decreasing = TRUE), 5)
## 
## Engineering and Technology (e.g. Civil Engineering, Mechanical Engineering) 
##                                                                          71 
##                       Natural Sciences (e.g. Mathematics, Computer Science) 
##                                                                          25 
##                      Social Sciences (e.g. Economics, Educational Sciences) 
##                                                                          15 
##                                                               Not Specified 
##                                                                           5 
##                                        Humanities (e.g. History, Languages) 
##                                                                           2

Manipulation checks for team goal, team member independence, and ability to coordinate work

library(dplyr)
library(psych)

# mathChat - Interdependence
int_mathChat <- data %>%
  dplyr::select(mathChat.6.player.int1, mathChat.6.player.int2, mathChat.6.player.int3)
psych::alpha(int_mathChat, check.keys=TRUE)
## Warning in psych::alpha(int_mathChat, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = int_mathChat, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean  sd median_r
##       0.63      0.63    0.54      0.37 1.7 0.058    4 1.3     0.37
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.50  0.63  0.73
## Duhachek  0.52  0.63  0.75
## 
##  Reliability if an item is dropped:
##                         raw_alpha std.alpha G6(smc) average_r S/N alpha se
## mathChat.6.player.int1-      0.54      0.54    0.37      0.37 1.2    0.083
## mathChat.6.player.int2       0.55      0.55    0.38      0.38 1.2    0.081
## mathChat.6.player.int3       0.51      0.51    0.34      0.34 1.0    0.090
##                         var.r med.r
## mathChat.6.player.int1-    NA  0.37
## mathChat.6.player.int2     NA  0.38
## mathChat.6.player.int3     NA  0.34
## 
##  Item statistics 
##                          n raw.r std.r r.cor r.drop mean  sd
## mathChat.6.player.int1- 60  0.78  0.76  0.55   0.44  3.6 1.8
## mathChat.6.player.int2  60  0.75  0.75  0.54   0.43  3.5 1.7
## mathChat.6.player.int3  60  0.76  0.77  0.58   0.46  5.0 1.6
## 
## Non missing response frequency for each item
##                           1    2    3    4    5    6    7 miss
## mathChat.6.player.int1 0.08 0.08 0.17 0.13 0.18 0.22 0.13  0.5
## mathChat.6.player.int2 0.08 0.28 0.18 0.15 0.13 0.13 0.03  0.5
## mathChat.6.player.int3 0.03 0.08 0.08 0.08 0.25 0.32 0.15  0.5
# mathChat - Common Goal
cg_mathChat <- data %>%
  dplyr::select(mathChat.6.player.cg1, mathChat.6.player.cg2, mathChat.6.player.cg3, 
         mathChat.6.player.cg4, mathChat.6.player.cg5, mathChat.6.player.cg6)
psych::alpha(cg_mathChat, check.keys=TRUE)
## Warning in psych::alpha(cg_mathChat, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = cg_mathChat, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r  S/N  ase mean   sd median_r
##       0.37      0.37     0.4      0.09 0.59 0.09  4.8 0.76    0.071
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.17  0.37  0.53
## Duhachek  0.19  0.37  0.54
## 
##  Reliability if an item is dropped:
##                        raw_alpha std.alpha G6(smc) average_r  S/N alpha se
## mathChat.6.player.cg1-      0.36      0.35    0.36     0.098 0.55    0.091
## mathChat.6.player.cg2       0.33      0.34    0.32     0.092 0.51    0.097
## mathChat.6.player.cg3-      0.36      0.36    0.35     0.103 0.57    0.093
## mathChat.6.player.cg4       0.30      0.30    0.32     0.078 0.42    0.101
## mathChat.6.player.cg5-      0.29      0.29    0.34     0.077 0.42    0.104
## mathChat.6.player.cg6       0.33      0.34    0.37     0.092 0.51    0.098
##                        var.r med.r
## mathChat.6.player.cg1- 0.018 0.120
## mathChat.6.player.cg2  0.012 0.052
## mathChat.6.player.cg3- 0.013 0.081
## mathChat.6.player.cg4  0.020 0.066
## mathChat.6.player.cg5- 0.023 0.052
## mathChat.6.player.cg6  0.025 0.086
## 
##  Item statistics 
##                         n raw.r std.r r.cor r.drop mean  sd
## mathChat.6.player.cg1- 60  0.47  0.46  0.27   0.13  5.4 1.7
## mathChat.6.player.cg2  60  0.45  0.48  0.34   0.17  5.8 1.4
## mathChat.6.player.cg3- 60  0.46  0.45  0.26   0.12  4.2 1.6
## mathChat.6.player.cg4  60  0.49  0.53  0.38   0.22  4.6 1.3
## mathChat.6.player.cg5- 60  0.57  0.54  0.36   0.22  3.8 1.8
## mathChat.6.player.cg6  60  0.50  0.49  0.26   0.17  4.7 1.6
## 
## Non missing response frequency for each item
##                          1    2    3    4    5    6    7 miss
## mathChat.6.player.cg1 0.32 0.32 0.08 0.10 0.12 0.05 0.02  0.5
## mathChat.6.player.cg2 0.02 0.03 0.02 0.05 0.15 0.37 0.37  0.5
## mathChat.6.player.cg3 0.07 0.17 0.22 0.15 0.23 0.15 0.02  0.5
## mathChat.6.player.cg4 0.02 0.05 0.13 0.22 0.32 0.23 0.03  0.5
## mathChat.6.player.cg5 0.07 0.13 0.18 0.18 0.18 0.13 0.12  0.5
## mathChat.6.player.cg6 0.07 0.05 0.10 0.17 0.25 0.28 0.08  0.5
# mathChat - Means for Coordination
mc_mathChat <- data %>%
  dplyr::select(mathChat.6.player.mc1, mathChat.6.player.mc2)
psych::alpha(mc_mathChat, check.keys=TRUE) 
## Warning in psych::alpha(mc_mathChat, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = mc_mathChat, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N  ase mean  sd median_r
##       0.67      0.67    0.51      0.51   2 0.06    4 1.7     0.51
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.53  0.67  0.77
## Duhachek  0.55  0.67  0.79
## 
##  Reliability if an item is dropped:
##                        raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r
## mathChat.6.player.mc1-      0.54      0.51    0.26      0.51   1       NA     0
## mathChat.6.player.mc2       0.48      0.51    0.26      0.51   1       NA     0
##                        med.r
## mathChat.6.player.mc1-  0.51
## mathChat.6.player.mc2   0.51
## 
##  Item statistics 
##                         n raw.r std.r r.cor r.drop mean  sd
## mathChat.6.player.mc1- 60  0.88  0.87  0.62   0.51  3.5 2.0
## mathChat.6.player.mc2  60  0.86  0.87  0.62   0.51  4.5 1.9
## 
## Non missing response frequency for each item
##                          1    2    3    4    5    6    7 miss
## mathChat.6.player.mc1 0.07 0.15 0.18 0.02 0.13 0.30 0.15  0.5
## mathChat.6.player.mc2 0.07 0.15 0.10 0.07 0.27 0.22 0.13  0.5
# mathJitsi - Interdependence
int_mathJitsi <- data %>%
  dplyr::select(mathJitsi.6.player.int1, mathJitsi.6.player.int2, mathJitsi.6.player.int3)
psych::alpha(int_mathJitsi, check.keys=TRUE)
## Warning in psych::alpha(int_mathJitsi, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = int_mathJitsi, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r  S/N   ase mean  sd median_r
##       0.42      0.42    0.34       0.2 0.73 0.091  4.6 1.3     0.23
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.22  0.42  0.58
## Duhachek  0.25  0.42  0.60
## 
##  Reliability if an item is dropped:
##                          raw_alpha std.alpha G6(smc) average_r  S/N alpha se
## mathJitsi.6.player.int1-      0.37      0.37   0.231     0.231 0.60     0.11
## mathJitsi.6.player.int2       0.14      0.14   0.077     0.077 0.17     0.16
## mathJitsi.6.player.int3       0.44      0.44   0.279     0.279 0.77     0.10
##                          var.r med.r
## mathJitsi.6.player.int1-    NA 0.231
## mathJitsi.6.player.int2     NA 0.077
## mathJitsi.6.player.int3     NA 0.279
## 
##  Item statistics 
##                           n raw.r std.r r.cor r.drop mean  sd
## mathJitsi.6.player.int1- 60  0.68  0.66  0.36   0.23  4.1 2.0
## mathJitsi.6.player.int2  60  0.75  0.74  0.53   0.35  4.6 1.9
## mathJitsi.6.player.int3  60  0.61  0.64  0.30   0.19  5.2 1.8
## 
## Non missing response frequency for each item
##                            1    2    3    4    5    6    7 miss
## mathJitsi.6.player.int1 0.13 0.17 0.18 0.05 0.22 0.13 0.12  0.5
## mathJitsi.6.player.int2 0.03 0.17 0.17 0.05 0.17 0.20 0.22  0.5
## mathJitsi.6.player.int3 0.02 0.13 0.05 0.08 0.13 0.32 0.27  0.5
# mathJitsi - Common Goal
cg_mathJitsi <- data %>%
  dplyr::select(mathJitsi.6.player.cg1, mathJitsi.6.player.cg2, mathJitsi.6.player.cg3, 
         mathJitsi.6.player.cg4, mathJitsi.6.player.cg5, mathJitsi.6.player.cg6)
psych::alpha(cg_mathJitsi, check.keys=TRUE)
## Warning in psych::alpha(cg_mathJitsi, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = cg_mathJitsi, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r  S/N   ase mean   sd median_r
##       0.41      0.43    0.51      0.11 0.75 0.085  4.9 0.81     0.08
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.23  0.41  0.56
## Duhachek  0.24  0.41  0.58
## 
##  Reliability if an item is dropped:
##                         raw_alpha std.alpha G6(smc) average_r  S/N alpha se
## mathJitsi.6.player.cg1-      0.31      0.34    0.40     0.094 0.52    0.100
## mathJitsi.6.player.cg2       0.43      0.44    0.48     0.134 0.78    0.084
## mathJitsi.6.player.cg3-      0.39      0.41    0.41     0.123 0.70    0.089
## mathJitsi.6.player.cg4       0.28      0.30    0.35     0.078 0.42    0.104
## mathJitsi.6.player.cg5-      0.44      0.45    0.53     0.139 0.81    0.083
## mathJitsi.6.player.cg6       0.33      0.35    0.40     0.096 0.53    0.097
##                         var.r med.r
## mathJitsi.6.player.cg1- 0.034 0.093
## mathJitsi.6.player.cg2  0.025 0.094
## mathJitsi.6.player.cg3- 0.021 0.078
## mathJitsi.6.player.cg4  0.027 0.094
## mathJitsi.6.player.cg5- 0.036 0.137
## mathJitsi.6.player.cg6  0.026 0.074
## 
##  Item statistics 
##                          n raw.r std.r r.cor r.drop mean  sd
## mathJitsi.6.player.cg1- 60  0.56  0.56  0.44  0.280  5.7 1.6
## mathJitsi.6.player.cg2  60  0.41  0.43  0.23  0.093  5.7 1.6
## mathJitsi.6.player.cg3- 60  0.49  0.47  0.35  0.157  4.1 1.7
## mathJitsi.6.player.cg4  60  0.59  0.62  0.55  0.342  4.3 1.5
## mathJitsi.6.player.cg5- 60  0.46  0.41  0.15  0.096  4.2 1.8
## mathJitsi.6.player.cg6  60  0.53  0.56  0.45  0.252  5.1 1.5
## 
## Non missing response frequency for each item
##                           1    2    3    4    5    6    7 miss
## mathJitsi.6.player.cg1 0.45 0.22 0.12 0.08 0.08 0.05 0.00  0.5
## mathJitsi.6.player.cg2 0.03 0.05 0.02 0.05 0.17 0.32 0.37  0.5
## mathJitsi.6.player.cg3 0.07 0.20 0.18 0.17 0.13 0.20 0.05  0.5
## mathJitsi.6.player.cg4 0.03 0.10 0.13 0.27 0.27 0.15 0.05  0.5
## mathJitsi.6.player.cg5 0.15 0.12 0.15 0.30 0.07 0.12 0.10  0.5
## mathJitsi.6.player.cg6 0.02 0.03 0.07 0.27 0.17 0.22 0.23  0.5
# mathJitsi - Means for Coordination
mc_mathJitsi <- data %>%
  dplyr::select(mathJitsi.6.player.mc1, mathJitsi.6.player.mc2)
psych::alpha(mc_mathJitsi, check.keys=TRUE) 
## Warning in psych::alpha(mc_mathJitsi, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = mc_mathJitsi, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean  sd median_r
##       0.57      0.58    0.41      0.41 1.4 0.076  5.1 1.4     0.41
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.38  0.57  0.70
## Duhachek  0.42  0.57  0.72
## 
##  Reliability if an item is dropped:
##                         raw_alpha std.alpha G6(smc) average_r  S/N alpha se
## mathJitsi.6.player.mc1-      0.53      0.41    0.17      0.41 0.69       NA
## mathJitsi.6.player.mc2       0.31      0.41    0.17      0.41 0.69       NA
##                         var.r med.r
## mathJitsi.6.player.mc1-     0  0.41
## mathJitsi.6.player.mc2      0  0.41
## 
##  Item statistics 
##                          n raw.r std.r r.cor r.drop mean  sd
## mathJitsi.6.player.mc1- 60  0.88  0.84  0.54   0.41  4.7 1.8
## mathJitsi.6.player.mc2  60  0.79  0.84  0.54   0.41  5.4 1.4
## 
## Non missing response frequency for each item
##                          1    2    3    4    5    6    7 miss
## mathJitsi.6.player.mc1 0.2 0.23 0.13 0.12 0.13 0.17 0.02  0.5
## mathJitsi.6.player.mc2 0.0 0.02 0.12 0.13 0.17 0.28 0.28  0.5
# HiddenProfile_Chat - Interdependence
int_HiddenProfile_Chat <- data %>%
  dplyr::select(HiddenProfile_Chat.3.player.int1, HiddenProfile_Chat.3.player.int2, HiddenProfile_Chat.3.player.int3)
psych::alpha(int_HiddenProfile_Chat, check.keys=TRUE)
## Warning in psych::alpha(int_HiddenProfile_Chat, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = int_HiddenProfile_Chat, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r  S/N ase mean sd median_r
##       0.37       0.4    0.33      0.18 0.65 0.1  5.3  1    0.095
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.15  0.37  0.55
## Duhachek  0.18  0.37  0.57
## 
##  Reliability if an item is dropped:
##                                   raw_alpha std.alpha G6(smc) average_r  S/N
## HiddenProfile_Chat.3.player.int1-      0.52      0.53   0.357     0.357 1.11
## HiddenProfile_Chat.3.player.int2       0.17      0.17   0.095     0.095 0.21
## HiddenProfile_Chat.3.player.int3       0.15      0.16   0.086     0.086 0.19
##                                   alpha se var.r med.r
## HiddenProfile_Chat.3.player.int1-    0.086    NA 0.357
## HiddenProfile_Chat.3.player.int2     0.151    NA 0.095
## HiddenProfile_Chat.3.player.int3     0.149    NA 0.086
## 
##  Item statistics 
##                                    n raw.r std.r r.cor r.drop mean  sd
## HiddenProfile_Chat.3.player.int1- 60  0.64  0.59  0.17   0.11  5.0 1.7
## HiddenProfile_Chat.3.player.int2  60  0.65  0.71  0.49   0.29  5.6 1.3
## HiddenProfile_Chat.3.player.int3  60  0.72  0.72  0.50   0.28  5.5 1.6
## 
## Non missing response frequency for each item
##                                     1    2    3    4    5    6    7 miss
## HiddenProfile_Chat.3.player.int1 0.18 0.28 0.27 0.03 0.12 0.07 0.05  0.5
## HiddenProfile_Chat.3.player.int2 0.00 0.03 0.05 0.12 0.17 0.37 0.27  0.5
## HiddenProfile_Chat.3.player.int3 0.02 0.07 0.07 0.07 0.17 0.30 0.32  0.5
# HiddenProfile_Chat - Common Goal
cg_HiddenProfile_Chat <- data %>%
  dplyr::select(HiddenProfile_Chat.3.player.cg1, HiddenProfile_Chat.3.player.cg2, HiddenProfile_Chat.3.player.cg3, 
         HiddenProfile_Chat.3.player.cg4, HiddenProfile_Chat.3.player.cg5, HiddenProfile_Chat.3.player.cg6)
psych::alpha(cg_HiddenProfile_Chat, check.keys=TRUE)
## Warning in psych::alpha(cg_HiddenProfile_Chat, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = cg_HiddenProfile_Chat, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean   sd median_r
##       0.64      0.65    0.71      0.23 1.8 0.052  4.9 0.87     0.18
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.53  0.64  0.73
## Duhachek  0.53  0.64  0.74
## 
##  Reliability if an item is dropped:
##                                  raw_alpha std.alpha G6(smc) average_r S/N
## HiddenProfile_Chat.3.player.cg1-      0.57      0.58    0.63      0.22 1.4
## HiddenProfile_Chat.3.player.cg2       0.56      0.57    0.61      0.21 1.3
## HiddenProfile_Chat.3.player.cg3-      0.55      0.56    0.58      0.21 1.3
## HiddenProfile_Chat.3.player.cg4       0.59      0.60    0.63      0.23 1.5
## HiddenProfile_Chat.3.player.cg5-      0.67      0.67    0.71      0.29 2.1
## HiddenProfile_Chat.3.player.cg6       0.60      0.61    0.65      0.24 1.6
##                                  alpha se var.r med.r
## HiddenProfile_Chat.3.player.cg1-    0.064 0.031  0.22
## HiddenProfile_Chat.3.player.cg2     0.064 0.030  0.18
## HiddenProfile_Chat.3.player.cg3-    0.066 0.028  0.18
## HiddenProfile_Chat.3.player.cg4     0.060 0.033  0.18
## HiddenProfile_Chat.3.player.cg5-    0.048 0.030  0.27
## HiddenProfile_Chat.3.player.cg6     0.057 0.034  0.22
## 
##  Item statistics 
##                                   n raw.r std.r r.cor r.drop mean  sd
## HiddenProfile_Chat.3.player.cg1- 60  0.65  0.64  0.56   0.43  5.1 1.5
## HiddenProfile_Chat.3.player.cg2  60  0.65  0.67  0.60   0.46  5.5 1.3
## HiddenProfile_Chat.3.player.cg3- 60  0.69  0.68  0.63   0.48  4.7 1.5
## HiddenProfile_Chat.3.player.cg4  60  0.57  0.60  0.51   0.37  4.7 1.3
## HiddenProfile_Chat.3.player.cg5- 60  0.48  0.43  0.24   0.18  4.3 1.7
## HiddenProfile_Chat.3.player.cg6  60  0.56  0.58  0.48   0.34  5.1 1.4
## 
## Non missing response frequency for each item
##                                    1    2    3    4    5    6    7 miss
## HiddenProfile_Chat.3.player.cg1 0.13 0.38 0.18 0.13 0.07 0.08 0.02  0.5
## HiddenProfile_Chat.3.player.cg2 0.00 0.07 0.02 0.08 0.22 0.38 0.23  0.5
## HiddenProfile_Chat.3.player.cg3 0.03 0.37 0.23 0.12 0.10 0.15 0.00  0.5
## HiddenProfile_Chat.3.player.cg4 0.00 0.05 0.12 0.28 0.25 0.22 0.08  0.5
## HiddenProfile_Chat.3.player.cg5 0.05 0.27 0.18 0.22 0.08 0.15 0.05  0.5
## HiddenProfile_Chat.3.player.cg6 0.00 0.07 0.07 0.15 0.25 0.33 0.13  0.5
# HiddenProfile_Chat - Means for Coordination
mc_HiddenProfile_Chat <- data %>%
  dplyr::select(HiddenProfile_Chat.3.player.mc1, HiddenProfile_Chat.3.player.mc2)
psych::alpha(mc_HiddenProfile_Chat, check.keys=TRUE) 
## Warning in psych::alpha(mc_HiddenProfile_Chat, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = mc_HiddenProfile_Chat, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean  sd median_r
##       0.57      0.58    0.41      0.41 1.4 0.076  4.7 1.4     0.41
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.39  0.57  0.70
## Duhachek  0.43  0.57  0.72
## 
##  Reliability if an item is dropped:
##                                  raw_alpha std.alpha G6(smc) average_r S/N
## HiddenProfile_Chat.3.player.mc1-      0.51      0.41    0.17      0.41 0.7
## HiddenProfile_Chat.3.player.mc2       0.33      0.41    0.17      0.41 0.7
##                                  alpha se var.r med.r
## HiddenProfile_Chat.3.player.mc1-       NA     0  0.41
## HiddenProfile_Chat.3.player.mc2        NA     0  0.41
## 
##  Item statistics 
##                                   n raw.r std.r r.cor r.drop mean  sd
## HiddenProfile_Chat.3.player.mc1- 60  0.88  0.84  0.54   0.41  4.3 1.9
## HiddenProfile_Chat.3.player.mc2  60  0.80  0.84  0.54   0.41  5.1 1.5
## 
## Non missing response frequency for each item
##                                    1    2    3    4    5    6    7 miss
## HiddenProfile_Chat.3.player.mc1 0.08 0.32 0.15 0.05 0.17 0.18 0.05  0.5
## HiddenProfile_Chat.3.player.mc2 0.02 0.07 0.08 0.13 0.18 0.40 0.12  0.5
# HiddenProfile_Jitsi - Interdependence
int_HiddenProfile_Jitsi <- data %>%
  dplyr::select(HiddenProfile_Jitsi.3.player.int1, HiddenProfile_Jitsi.3.player.int2, HiddenProfile_Jitsi.3.player.int3)
psych::alpha(int_HiddenProfile_Jitsi, check.keys=TRUE)
## Warning in psych::alpha(int_HiddenProfile_Jitsi, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = int_HiddenProfile_Jitsi, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r  S/N   ase mean sd median_r
##       0.39      0.41    0.36      0.19 0.69 0.095  5.7  1     0.16
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.17  0.39  0.56
## Duhachek  0.20  0.39  0.57
## 
##  Reliability if an item is dropped:
##                                    raw_alpha std.alpha G6(smc) average_r   S/N
## HiddenProfile_Jitsi.3.player.int1-      0.54     0.569  0.3980    0.3980 1.322
## HiddenProfile_Jitsi.3.player.int2       0.27     0.269  0.1556    0.1556 0.368
## HiddenProfile_Jitsi.3.player.int3       0.01     0.011  0.0054    0.0054 0.011
##                                    alpha se var.r  med.r
## HiddenProfile_Jitsi.3.player.int1-    0.077    NA 0.3980
## HiddenProfile_Jitsi.3.player.int2     0.133    NA 0.1556
## HiddenProfile_Jitsi.3.player.int3     0.168    NA 0.0054
## 
##  Item statistics 
##                                     n raw.r std.r r.cor r.drop mean  sd
## HiddenProfile_Jitsi.3.player.int1- 60  0.64  0.57  0.15   0.11  5.4 1.7
## HiddenProfile_Jitsi.3.player.int2  60  0.59  0.69  0.46   0.27  6.2 1.1
## HiddenProfile_Jitsi.3.player.int3  60  0.78  0.77  0.60   0.35  5.4 1.7
## 
## Non missing response frequency for each item
##                                      1    2    3    4    5    6    7 miss
## HiddenProfile_Jitsi.3.player.int1 0.28 0.32 0.20 0.07 0.03 0.05 0.05  0.5
## HiddenProfile_Jitsi.3.player.int2 0.00 0.00 0.03 0.08 0.10 0.23 0.55  0.5
## HiddenProfile_Jitsi.3.player.int3 0.05 0.02 0.08 0.07 0.20 0.25 0.33  0.5
# HiddenProfile_Jitsi - Common Goal
cg_HiddenProfile_Jitsi <- data %>%
  dplyr::select(HiddenProfile_Jitsi.3.player.cg1, HiddenProfile_Jitsi.3.player.cg2, HiddenProfile_Jitsi.3.player.cg3, 
         HiddenProfile_Jitsi.3.player.cg4, HiddenProfile_Jitsi.3.player.cg5, HiddenProfile_Jitsi.3.player.cg6)
psych::alpha(cg_HiddenProfile_Jitsi, check.keys=TRUE)
## Warning in psych::alpha(cg_HiddenProfile_Jitsi, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = cg_HiddenProfile_Jitsi, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean  sd median_r
##        0.6      0.61    0.67       0.2 1.5 0.057  5.1 0.9     0.22
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.48   0.6  0.70
## Duhachek  0.49   0.6  0.71
## 
##  Reliability if an item is dropped:
##                                   raw_alpha std.alpha G6(smc) average_r S/N
## HiddenProfile_Jitsi.3.player.cg1-      0.51      0.51    0.55      0.17 1.1
## HiddenProfile_Jitsi.3.player.cg2       0.54      0.54    0.60      0.19 1.2
## HiddenProfile_Jitsi.3.player.cg3-      0.56      0.57    0.57      0.21 1.3
## HiddenProfile_Jitsi.3.player.cg4       0.55      0.56    0.63      0.20 1.3
## HiddenProfile_Jitsi.3.player.cg5-      0.60      0.60    0.64      0.23 1.5
## HiddenProfile_Jitsi.3.player.cg6       0.57      0.58    0.60      0.21 1.4
##                                   alpha se var.r med.r
## HiddenProfile_Jitsi.3.player.cg1-    0.071 0.027  0.18
## HiddenProfile_Jitsi.3.player.cg2     0.067 0.029  0.23
## HiddenProfile_Jitsi.3.player.cg3-    0.064 0.029  0.18
## HiddenProfile_Jitsi.3.player.cg4     0.066 0.045  0.26
## HiddenProfile_Jitsi.3.player.cg5-    0.058 0.040  0.22
## HiddenProfile_Jitsi.3.player.cg6     0.062 0.031  0.17
## 
##  Item statistics 
##                                    n raw.r std.r r.cor r.drop mean  sd
## HiddenProfile_Jitsi.3.player.cg1- 60  0.66  0.67  0.62   0.44  5.4 1.5
## HiddenProfile_Jitsi.3.player.cg2  60  0.61  0.62  0.52   0.38  5.3 1.5
## HiddenProfile_Jitsi.3.player.cg3- 60  0.59  0.56  0.48   0.33  5.0 1.7
## HiddenProfile_Jitsi.3.player.cg4  60  0.59  0.58  0.43   0.35  4.8 1.6
## HiddenProfile_Jitsi.3.player.cg5- 60  0.52  0.50  0.34   0.24  4.8 1.7
## HiddenProfile_Jitsi.3.player.cg6  60  0.52  0.55  0.45   0.30  5.4 1.4
## 
## Non missing response frequency for each item
##                                     1    2    3    4    5    6    7 miss
## HiddenProfile_Jitsi.3.player.cg1 0.28 0.28 0.13 0.18 0.07 0.03 0.02  0.5
## HiddenProfile_Jitsi.3.player.cg2 0.00 0.05 0.12 0.13 0.12 0.33 0.25  0.5
## HiddenProfile_Jitsi.3.player.cg3 0.18 0.30 0.15 0.15 0.13 0.05 0.03  0.5
## HiddenProfile_Jitsi.3.player.cg4 0.02 0.08 0.13 0.08 0.32 0.23 0.13  0.5
## HiddenProfile_Jitsi.3.player.cg5 0.18 0.22 0.13 0.30 0.05 0.08 0.03  0.5
## HiddenProfile_Jitsi.3.player.cg6 0.02 0.02 0.02 0.27 0.13 0.32 0.23  0.5
# HiddenProfile_Jitsi - Means for Coordination
mc_HiddenProfile_Jitsi <- data %>%
  dplyr::select(HiddenProfile_Jitsi.3.player.mc1, HiddenProfile_Jitsi.3.player.mc2)
psych::alpha(mc_HiddenProfile_Jitsi, check.keys=TRUE) 
## Warning in psych::alpha(mc_HiddenProfile_Jitsi, check.keys = TRUE): Some items were negatively correlated with the first principal component and were automatically reversed.
##  This is indicated by a negative sign for the variable name.
## 
## Reliability analysis   
## Call: psych::alpha(x = mc_HiddenProfile_Jitsi, check.keys = TRUE)
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean  sd median_r
##       0.71      0.75     0.6       0.6   3 0.047  5.4 1.4      0.6
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.58  0.71   0.8
## Duhachek  0.62  0.71   0.8
## 
##  Reliability if an item is dropped:
##                                   raw_alpha std.alpha G6(smc) average_r S/N
## HiddenProfile_Jitsi.3.player.mc1-      0.91       0.6    0.36       0.6 1.5
## HiddenProfile_Jitsi.3.player.mc2       0.39       0.6    0.36       0.6 1.5
##                                   alpha se var.r med.r
## HiddenProfile_Jitsi.3.player.mc1-       NA     0   0.6
## HiddenProfile_Jitsi.3.player.mc2        NA     0   0.6
## 
##  Item statistics 
##                                    n raw.r std.r r.cor r.drop mean  sd
## HiddenProfile_Jitsi.3.player.mc1- 60  0.94  0.89  0.69    0.6  5.0 1.8
## HiddenProfile_Jitsi.3.player.mc2  60  0.84  0.89  0.69    0.6  5.8 1.2
## 
## Non missing response frequency for each item
##                                    1    2    3    4    5   6    7 miss
## HiddenProfile_Jitsi.3.player.mc1 0.2 0.33 0.17 0.05 0.10 0.1 0.05  0.5
## HiddenProfile_Jitsi.3.player.mc2 0.0 0.00 0.07 0.08 0.22 0.3 0.33  0.5
# Items umpolen (nur die mit negativem Vorzeichen aus der Alpha-Analyse)
data <- data %>%
  mutate(
    # mathChat Items umpolen:
    mathChat.6.player.int1_rev = 8 - mathChat.6.player.int1,
    mathChat.6.player.cg1_rev = 8 - mathChat.6.player.cg1,
    mathChat.6.player.cg3_rev = 8 - mathChat.6.player.cg3,
    mathChat.6.player.cg5_rev = 8 - mathChat.6.player.cg5,
    mathChat.6.player.mc1_rev = 8 - mathChat.6.player.mc1
  )

# Skalenmittelwerte je Konstrukt pro Treatment
data <- data %>%
  mutate(
    mc_mathChat = rowMeans(dplyr::select(., mathChat.6.player.mc1_rev, mathChat.6.player.mc2), na.rm = TRUE),
    mc_mathJitsi = rowMeans(dplyr::select(., mathJitsi.6.player.mc1, mathJitsi.6.player.mc2), na.rm = TRUE),
    mc_hpChat = rowMeans(dplyr::select(., HiddenProfile_Chat.3.player.mc1, HiddenProfile_Chat.3.player.mc2), na.rm = TRUE),
    mc_hpJitsi = rowMeans(dplyr::select(., HiddenProfile_Jitsi.3.player.mc1, HiddenProfile_Jitsi.3.player.mc2), na.rm = TRUE),

    int_mathChat = rowMeans(dplyr::select(., mathChat.6.player.int1_rev, mathChat.6.player.int2, mathChat.6.player.int3), na.rm = TRUE),
    int_mathJitsi = rowMeans(dplyr::select(., mathJitsi.6.player.int1, mathJitsi.6.player.int2, mathJitsi.6.player.int3), na.rm = TRUE),
    int_hpChat = rowMeans(dplyr::select(., HiddenProfile_Chat.3.player.int1, HiddenProfile_Chat.3.player.int2, HiddenProfile_Chat.3.player.int3), na.rm = TRUE),
    int_hpJitsi = rowMeans(dplyr::select(., HiddenProfile_Jitsi.3.player.int1, HiddenProfile_Jitsi.3.player.int2, HiddenProfile_Jitsi.3.player.int3), na.rm = TRUE),

    cg_mathChat = rowMeans(dplyr::select(., mathChat.6.player.cg1_rev, mathChat.6.player.cg2, mathChat.6.player.cg3_rev, 
                                  mathChat.6.player.cg4, mathChat.6.player.cg5_rev, mathChat.6.player.cg6), na.rm = TRUE),
    cg_mathJitsi = rowMeans(dplyr::select(., starts_with("mathJitsi.6.player.cg")), na.rm = TRUE),
    cg_hpChat = rowMeans(dplyr::select(., starts_with("HiddenProfile_Chat.3.player.cg")), na.rm = TRUE),
    cg_hpJitsi = rowMeans(dplyr::select(., starts_with("HiddenProfile_Jitsi.3.player.cg")), na.rm = TRUE)
  )

mc_long <- data %>%
  dplyr::select(mc_mathChat, mc_mathJitsi, mc_hpChat, mc_hpJitsi) %>%
  pivot_longer(cols = everything(), names_to = "Treatment", values_to = "Score") %>%
  mutate(Treatment = dplyr::recode(Treatment,
                            mc_mathChat = "Math – Chat",
                            mc_mathJitsi = "Math – Jitsi",
                            mc_hpChat = "HiddenProfile – Chat",
                            mc_hpJitsi = "HiddenProfile – Jitsi"))

ggplot(mc_long, aes(x = Treatment, y = Score)) +
  geom_boxplot(fill = "skyblue", alpha = 0.7) +
  theme_minimal() +
  labs(title = "Means for Coordination (MC) – nach Treatment",
       y = "Skalenwert (1–7)", x = NULL) +
  ylim(1, 7)
## Warning: Removed 240 rows containing non-finite outside the scale range
## (`stat_boxplot()`).

int_long <- data %>%
  dplyr::select(int_mathChat, int_mathJitsi, int_hpChat, int_hpJitsi) %>%
  pivot_longer(cols = everything(), names_to = "Treatment", values_to = "Score") %>%
  mutate(Treatment = dplyr::recode(Treatment,
                            int_mathChat = "Math – Chat",
                            int_mathJitsi = "Math – Jitsi",
                            int_hpChat = "HiddenProfile – Chat",
                            int_hpJitsi = "HiddenProfile – Jitsi"))

ggplot(int_long, aes(x = Treatment, y = Score)) +
  geom_boxplot(fill = "orchid", alpha = 0.7) +
  theme_minimal() +
  labs(title = "Team Member Interdependence (INT) – nach Treatment",
       y = "Skalenwert (1–7)", x = NULL) +
  ylim(1, 7)
## Warning: Removed 240 rows containing non-finite outside the scale range
## (`stat_boxplot()`).

cg_long <- data %>%
  dplyr::select(cg_mathChat, cg_mathJitsi, cg_hpChat, cg_hpJitsi) %>%
  pivot_longer(cols = everything(), names_to = "Treatment", values_to = "Score") %>%
  mutate(Treatment = dplyr::recode(Treatment,
                            cg_mathChat = "Math – Chat",
                            cg_mathJitsi = "Math – Jitsi",
                            cg_hpChat = "HiddenProfile – Chat",
                            cg_hpJitsi = "HiddenProfile – Jitsi"))

ggplot(cg_long, aes(x = Treatment, y = Score)) +
  geom_boxplot(fill = "seagreen3", alpha = 0.7) +
  theme_minimal() +
  labs(title = "Common Goal (CG) – nach Treatment",
       y = "Skalenwert (1–7)", x = NULL) +
  ylim(1, 7)
## Warning: Removed 240 rows containing non-finite outside the scale range
## (`stat_boxplot()`).

Manipulation check for difficulty

# Funktion um z.B. "['A', 'O', 'F', 'B']" in echten Vektor zu verwandeln
parse_order_string <- function(s) {
  s %>%
    str_remove_all("\\[|\\]|'|\"") %>%
    str_split(",\\s*") %>%
    unlist()
}

# Mapping Math-Codes zu Labels
map_math_difficulty <- function(code) {
  dplyr::recode(code,
         "B" = "Easy",
         "A" = "Optimal_Selected",
         "F" = "Optimal_Calibrated",
         "O" = "Hard")
}

data <- data %>%
  mutate(
    math_order = map(as.character(participant.condition_order), parse_order_string) %>%
                   map(~ map_chr(.x, map_math_difficulty)),
    hp_order   = map(as.character(participant.hp_condition_order), parse_order_string) %>%
                   map(str_to_title)
  )

# MATH TASK
math_difficulty_long <- data %>%
  dplyr::select(participant.code, math_order,
         starts_with("mathChat.3.player.csb"),
         starts_with("mathChat.4.player.csb"),
         starts_with("mathChat.5.player.csb"),
         starts_with("mathChat.6.player.csb"),
         starts_with("mathJitsi.3.player.csb"),
         starts_with("mathJitsi.4.player.csb"),
         starts_with("mathJitsi.5.player.csb"),
         starts_with("mathJitsi.6.player.csb")) %>%
  pivot_longer(cols = -c(participant.code, math_order),
               names_to = "var", values_to = "csb") %>%
  mutate(
    round = str_extract(var, "\\d+"),
    comm = ifelse(str_detect(var, "Chat"), "Chat", "Jitsi"),
    index = as.integer(round) - 2,  # weil math.3 = erste relevante Runde
    difficulty = map2_chr(math_order, index, ~ .x[.y])
  ) %>%
  drop_na(csb)

# HP TASK
hp_difficulty_long <- data %>%
  dplyr::select(participant.code, hp_order,
         starts_with("HiddenProfile_Chat."),
         starts_with("HiddenProfile_Jitsi.")) %>%
  pivot_longer(cols = matches("HiddenProfile_.*\\.player\\.csb[12]"),
               names_to = "var", values_to = "csb") %>%
  mutate(
    round = str_extract(var, "(?<=\\.)\\d+"),
    comm = ifelse(str_detect(var, "Chat"), "Chat", "Jitsi"),
    index = as.integer(round),  # hier ist Runde = Index
    difficulty = map2_chr(hp_order, index, ~ .x[.y])
  ) %>%
  drop_na(csb)

# X-Achse splitten in zwei ästhetischere Achsen:
ggplot(math_difficulty_long, aes(x = difficulty, y = csb, fill = comm)) +
  geom_boxplot(position = position_dodge(width = 0.75)) +
  labs(title = "Math Task", x = "Difficulty", y = "Subjective Difficulty") +
  theme_minimal()

ggplot(hp_difficulty_long, aes(x = difficulty, y = csb, fill = comm)) +
  geom_boxplot(position = position_dodge(width = 0.75)) +
  labs(title = "Hidden Profile Task", x = "Difficulty", y = "Subjective Difficulty") +
  theme_minimal()

Internal consistency check for flow construct and flow score calculation

# Reihenfolge-Parsing-Helfer
parse_order_string <- function(s) {
  s %>%
    str_remove_all("\\[|\\]|'|\"") %>%
    str_split(",\\s*") %>%
    unlist()
}

# Reihenfolgen extrahieren
data <- data %>%
  mutate(
    math_order = map(participant.condition_order, parse_order_string),
    hp_order   = map(participant.hp_condition_order, parse_order_string)
  )

# Funktion zum Remappen der Items von Runde -> Schwierigkeitsstufe
remap_fss_items <- function(df, prefix, rounds, difficulty_map) {
  out <- list()
  for (i in seq_along(rounds)) {
    r <- rounds[i]
    diff_code <- difficulty_map[i]
    for (j in 1:9) {
      old_name <- sprintf("%s.%d.player.fss%02d", prefix, r, j)
      new_name <- sprintf("%s.%s.player.fss%02d", prefix, diff_code, j)
      out[[new_name]] <- if (old_name %in% names(df)) df[[old_name]][1] else NA
    }
  }
  # Eine Zeile mit vielen Spalten zurückgeben
  return(as_tibble(out))
}

# Spaltennamen für Mathe-Items extrahieren
math_cols <- names(data)[startsWith(names(data), "math")]

# Mathe-Daten remappen
math_data <- data %>%
  mutate(math_items = pmap(
    c(list(math_order), dplyr::select(., all_of(math_cols))),
    function(order, ...) {
      df <- tibble(...)
      bind_cols(
        remap_fss_items(df, "mathJitsi", 3:6, order),
        remap_fss_items(df, "mathChat", 3:6, order)
      )
    }
  ))

# Spaltennamen für Hidden Profile-Items extrahieren
hp_cols <- names(data)[startsWith(names(data), "HiddenProfile_")]

# HP-Daten remappen
hp_data <- data %>%
  mutate(hp_items = pmap(
    c(list(hp_order), dplyr::select(., all_of(hp_cols))),
    function(order, ...) {
      df <- tibble(...)
      bind_cols(
        remap_fss_items(df, "HiddenProfile_Jitsi", 1:3, order),
        remap_fss_items(df, "HiddenProfile_Chat", 1:3, order)
      )
    }
  ))

# Funktion für Cronbach's Alpha und Mittelwert
aggregate_flow <- function(df, prefix, difficulties) {
  results <- list()
  
  # NEU: Alpha-Ausgabe Header
  cat("\n=== Cronbach's Alpha für", prefix, "===\n")
  
  for (d in difficulties) {
    for (comm in c("Chat", "Jitsi")) {
      # Prefix korrekt setzen – für HiddenProfile mit Unterstrich
      full_prefix <- if (prefix == "HiddenProfile") {
        paste0(prefix, "_", comm)
      } else {
        paste0(prefix, comm)
      }
      
      items <- sprintf("%s.%s.player.fss%02d", full_prefix, d, 1:10)
      valid_items <- items[items %in% names(df)]
      
      if (length(valid_items) >= 2) {
        item_df <- df[valid_items]
        alpha_val <- tryCatch(psych::alpha(item_df)$total$raw_alpha, error = function(e) NA)
        scale_mean <- rowMeans(item_df, na.rm = TRUE)
        
        # NEU: Alpha-Wert ausgeben
        scale_name <- paste0(prefix, "_", d, "_", comm)
        if (!is.na(alpha_val)) {
          cat(sprintf("%-20s: α = %.3f\n", scale_name, alpha_val))
        } else {
          cat(sprintf("%-20s: α = NA\n", scale_name))
        }
        
      } else {
        alpha_val <- NA
        scale_mean <- rep(NA, nrow(df))
        
        # NEU: Ausgabe für zu wenige Items
        scale_name <- paste0(prefix, "_", d, "_", comm)
        cat(sprintf("%-20s: α = NA (nur %d Items)\n", scale_name, length(valid_items)))
      }
      
      col_name <- paste0("fss_", prefix, "_", d, "_", comm)
      results[[col_name]] <- scale_mean
    }
  }
  as.data.frame(results)
}


full_items <- bind_cols(
  data["participant.code"],
  map_dfr(math_data$math_items, identity),
  map_dfr(hp_data$hp_items, identity)
)

# Skalen berechnen
math_scores <- aggregate_flow(full_items, "math", c("A", "O", "F", "B"))
## 
## === Cronbach's Alpha für math ===
## math_A_Chat         : α = 0.880
## math_A_Jitsi        : α = 0.906
## math_O_Chat         : α = 0.832
## math_O_Jitsi        : α = 0.873
## math_F_Chat         : α = 0.860
## math_F_Jitsi        : α = 0.883
## math_B_Chat         : α = 0.801
## math_B_Jitsi        : α = 0.900
hp_scores   <- aggregate_flow(full_items, "HiddenProfile", c("EASY", "MED", "HARD"))
## 
## === Cronbach's Alpha für HiddenProfile ===
## HiddenProfile_EASY_Chat: α = 0.891
## HiddenProfile_EASY_Jitsi: α = 0.831
## HiddenProfile_MED_Chat: α = 0.914
## HiddenProfile_MED_Jitsi: α = 0.897
## HiddenProfile_HARD_Chat: α = 0.938
## HiddenProfile_HARD_Jitsi: α = 0.898
# Enddatensatz mit allen Skalen
flow_scores <- bind_cols(full_items["participant.code"], math_scores, hp_scores)

# Wide → Long
flow_scores_long <- flow_scores %>%
  pivot_longer(
    cols = -participant.code,
    names_to = "scale_name",
    values_to = "flow_score"
  )

# Zerlegen von scale_name in task, difficulty, comm
flow_scores_long <- flow_scores_long %>%
  separate(scale_name, into = c("fss", "task", "difficulty", "comm"), sep = "_", remove = TRUE) %>%
  dplyr::select(-fss)

# NaN-Zeilen rausfiltern: nur tatsächliche Kommunikationsbedingung behalten
flow_scores_long <- flow_scores_long %>%
  filter(!is.na(flow_score))

data <- data %>%
  mutate(
    math_order = map(participant.condition_order, parse_order_string),
    hp_order   = map(participant.hp_condition_order, parse_order_string)
  )

# condition_order ergänzen
flow_scores_long <- flow_scores_long %>%
  left_join(data %>% dplyr::select(participant.code, participant.condition_order), by = "participant.code") %>%
  mutate(condition_order = participant.condition_order) %>%
  dplyr::select(-participant.condition_order)

# condition_order ergänzen
flow_scores_long <- flow_scores_long %>%
  left_join(data %>% dplyr::select(participant.code, participant.hp_condition_order), by = "participant.code") %>%
  mutate(hp_condition_order = participant.hp_condition_order) %>%
  dplyr::select(-participant.hp_condition_order)

# Team-ID erzeugen (falls noch nicht erfolgt)
data <- data %>%
  mutate(team_id = paste(session.code, Intro.1.group.custom_group_id, sep = "_"))

# Team-ID ergänzen
flow_scores_long <- flow_scores_long %>%
  left_join(data %>% dplyr::select(participant.code, team_id), by = "participant.code")

# Einheitliche Schreibweise für spätere Filter
flow_scores_long <- flow_scores_long %>%
  mutate(
    task = dplyr::recode(task,
                  "math" = "Math",
                  "HiddenProfile" = "HP"),
    difficulty = dplyr::recode(difficulty,
                        "B" = "Easy",
                        "A" = "Optimal_Selected",
                        "F" = "Optimal_Calibrated",
                        "O" = "Hard",
                        "EASY" = "Easy",
                        "MED" = "Medium",
                        "HARD" = "Hard")
  )

# Jetzt den Long-Datensatz als neuen flow_scores verwenden
flow_scores <- flow_scores_long

flow_scores <- flow_scores %>%
  mutate(
    order = case_when(
      task == "Math" ~ condition_order,
      task == "HP"   ~ hp_condition_order,
      TRUE           ~ NA_character_
    )
  ) %>%
  dplyr::select(-condition_order, -hp_condition_order)

# Boxplot erstellen
ggplot(flow_scores, aes(x = interaction(task, comm, sep = " - "), y = flow_score, fill = task)) +
  geom_boxplot(outlier.shape = NA, alpha = 0.7) +
  geom_jitter(width = 0.2, alpha = 0.4, color = "black", size = 1) +
  labs(
    title = "Flow-Scores nach Experimentalbedingung",
    x = "Bedingung (Task - Medium)",
    y = "Flow-Score"
  ) +
  scale_fill_brewer(palette = "Set2") +
  theme_minimal(base_size = 14) +
  theme(legend.position = "none")

Outlier check

# Ausreißer-Analyse für Flow-Scores
# ================================================================================

# 1. Ausreißer identifizieren (gruppenweise)
flow_scores_outlier <- flow_scores %>%
  group_by(task, comm) %>%
  mutate(
    mean_flow = mean(flow_score, na.rm = TRUE),
    sd_flow = sd(flow_score, na.rm = TRUE),
    z_score = (flow_score - mean_flow) / sd_flow,
    is_outlier = abs(z_score) > 2,
    outlier_direction = case_when(
      z_score > 2 ~ "high",
      z_score < -2 ~ "low",
      TRUE ~ "normal"
    )
  ) %>%
  ungroup()

# 2. Zusammenfassung der Ausreißer
outlier_summary <- flow_scores_outlier %>%
  group_by(task, comm) %>%
  dplyr::summarise(
    n_total = n(),
    n_outliers = sum(is_outlier),
    n_high = sum(outlier_direction == "high"),
    n_low = sum(outlier_direction == "low"),
    pct_outliers = round(mean(is_outlier) * 100, 2),
    mean_flow = round(mean(flow_score, na.rm = TRUE), 3),
    sd_flow = round(sd(flow_score, na.rm = TRUE), 3),
    .groups = "drop"
  )

print("Ausreißer-Zusammenfassung nach Bedingung:")
## [1] "Ausreißer-Zusammenfassung nach Bedingung:"
print(outlier_summary)
## # A tibble: 4 × 9
##   task  comm  n_total n_outliers n_high n_low pct_outliers mean_flow sd_flow
##   <chr> <chr>   <int>      <int>  <int> <int>        <dbl>     <dbl>   <dbl>
## 1 HP    Chat      180          9      0     9         5         5.26    1.22
## 2 HP    Jitsi     180          6      0     6         3.33      5.23    1.13
## 3 Math  Chat      240          7      0     7         2.92      5.20    1.11
## 4 Math  Jitsi     240         13      0    13         5.42      5.39    1.20
# 3. Gesamtübersicht
total_outliers <- flow_scores_outlier %>%
  dplyr::summarise(
    total_observations = n(),
    total_outliers = sum(is_outlier),
    pct_outliers = round(mean(is_outlier) * 100, 2)
  )

print("\nGesamtanzahl Ausreißer:")
## [1] "\nGesamtanzahl Ausreißer:"
print(total_outliers)
## # A tibble: 1 × 3
##   total_observations total_outliers pct_outliers
##                <int>          <int>        <dbl>
## 1                840             35         4.17
# 4. Details zu den Ausreißern
outlier_details <- flow_scores_outlier %>%
  filter(is_outlier) %>%
  dplyr::select(participant.code, task, comm, difficulty, flow_score, z_score, outlier_direction) %>%
  arrange(desc(abs(z_score)))

print("\nTop 10 extremste Ausreißer:")
## [1] "\nTop 10 extremste Ausreißer:"
print(head(outlier_details, 10))
## # A tibble: 10 × 7
##    participant.code task  comm  difficulty flow_score z_score outlier_direction
##    <chr>            <chr> <chr> <chr>           <dbl>   <dbl> <chr>            
##  1 15cwmo8e         Math  Chat  Hard             1.56   -3.20 low              
##  2 bujdmiwp         Math  Jitsi Easy             1.67   -3.11 low              
##  3 6uaqx4an         Math  Jitsi Hard             1.78   -3.01 low              
##  4 pnnxousf         HP    Chat  Hard             1.78   -3.01 low              
##  5 52urcwo7         Math  Jitsi Hard             2      -2.82 low              
##  6 95wh6h25         Math  Jitsi Hard             2.11   -2.72 low              
##  7 0vde4tkj         HP    Chat  Medium           2.11   -2.72 low              
##  8 nv5jk8z2         HP    Chat  Hard             2.33   -2.53 low              
##  9 b7tdlraf         Math  Jitsi Hard             2.33   -2.53 low              
## 10 15cwmo8e         HP    Chat  Easy             2.33   -2.53 low
# 5. Visualisierung der Ausreißer
library(ggplot2)

# Boxplot mit Ausreißern markiert
p1 <- ggplot(flow_scores_outlier, aes(x = interaction(task, comm), y = flow_score)) +
  geom_boxplot(aes(fill = task), alpha = 0.7) +
  geom_point(data = filter(flow_scores_outlier, is_outlier), 
             aes(color = outlier_direction), size = 3) +
  scale_color_manual(values = c("high" = "red", "low" = "blue")) +
  labs(title = "Flow-Scores mit markierten Ausreißern (>2 SD)",
       x = "Bedingung", y = "Flow Score",
       color = "Ausreißer-Typ") +
  theme_minimal()

print(p1)

# Z-Score Verteilung
p2 <- ggplot(flow_scores_outlier, aes(x = z_score)) +
  geom_histogram(bins = 30, fill = "lightblue", color = "black", alpha = 0.7) +
  geom_vline(xintercept = c(-2, 2), color = "red", linetype = "dashed") +
  facet_wrap(~ interaction(task, comm)) +
  labs(title = "Z-Score Verteilung der Flow-Werte",
       subtitle = "Rote Linien = ±2 SD Grenze",
       x = "Z-Score", y = "Häufigkeit") +
  theme_minimal()

print(p2)

# 6. Ausreißer nach Schwierigkeit untersuchen
outlier_by_difficulty <- flow_scores_outlier %>%
  group_by(difficulty, task) %>%
  dplyr::summarise(
    n_total = n(),
    n_outliers = sum(is_outlier),
    pct_outliers = round(mean(is_outlier) * 100, 2),
    outlier_types = paste(
      "High:", sum(outlier_direction == "high"),
      "Low:", sum(outlier_direction == "low")
    ),
    .groups = "drop"
  )

print("\nAusreißer nach Schwierigkeitsstufe:")
## [1] "\nAusreißer nach Schwierigkeitsstufe:"
print(outlier_by_difficulty)
## # A tibble: 7 × 6
##   difficulty         task  n_total n_outliers pct_outliers outlier_types  
##   <chr>              <chr>   <int>      <int>        <dbl> <chr>          
## 1 Easy               HP        120          2         1.67 High: 0 Low: 2 
## 2 Easy               Math      120          4         3.33 High: 0 Low: 4 
## 3 Hard               HP        120          8         6.67 High: 0 Low: 8 
## 4 Hard               Math      120         14        11.7  High: 0 Low: 14
## 5 Medium             HP        120          5         4.17 High: 0 Low: 5 
## 6 Optimal_Calibrated Math      120          1         0.83 High: 0 Low: 1 
## 7 Optimal_Selected   Math      120          1         0.83 High: 0 Low: 1
# 7. Prüfen ob bestimmte Teilnehmer häufig Ausreißer sind
participant_outlier_freq <- flow_scores_outlier %>%
  group_by(participant.code) %>%
  dplyr::summarise(
    n_measurements = n(),
    n_outlier = sum(is_outlier),
    pct_outlier = round(mean(is_outlier) * 100, 2)
  ) %>%
  filter(n_outlier > 0) %>%
  arrange(desc(n_outlier))

print("\nTeilnehmer mit Ausreißer-Messungen:")
## [1] "\nTeilnehmer mit Ausreißer-Messungen:"
print(head(participant_outlier_freq, 10))
## # A tibble: 10 × 4
##    participant.code n_measurements n_outlier pct_outlier
##    <chr>                     <int>     <int>       <dbl>
##  1 6uaqx4an                      7         4        57.1
##  2 95wh6h25                      7         3        42.9
##  3 0vde4tkj                      7         2        28.6
##  4 15cwmo8e                      7         2        28.6
##  5 b7tdlraf                      7         2        28.6
##  6 m4zob5bc                      7         2        28.6
##  7 xhiuzfzo                      7         2        28.6
##  8 29bb1m8o                      7         1        14.3
##  9 52urcwo7                      7         1        14.3
## 10 54jyvztp                      7         1        14.3
# 8. Team-Level Ausreißer prüfen (für Shared Flow)
team_flow_outliers <- flow_scores_outlier %>%
  group_by(team_id, task, comm) %>%
  dplyr::summarise(
    team_mean_flow = mean(flow_score, na.rm = TRUE),
    team_sd_flow = sd(flow_score, na.rm = TRUE),
    n_members = n(),
    n_outlier_members = sum(is_outlier),
    .groups = "drop"
  ) %>%
  group_by(task, comm) %>%
  mutate(
    grand_mean = mean(team_mean_flow, na.rm = TRUE),
    grand_sd = sd(team_mean_flow, na.rm = TRUE),
    team_z_score = (team_mean_flow - grand_mean) / grand_sd,
    is_team_outlier = abs(team_z_score) > 2
  ) %>%
  filter(is_team_outlier)

print("\nTeams mit extremen Flow-Werten:")
## [1] "\nTeams mit extremen Flow-Werten:"
print(team_flow_outliers)
## # A tibble: 3 × 11
## # Groups:   task, comm [3]
##   team_id    task  comm  team_mean_flow team_sd_flow n_members n_outlier_members
##   <chr>      <chr> <chr>          <dbl>        <dbl>     <int>             <int>
## 1 89vmiwh4_… HP    Chat            3.20        0.921         9                 3
## 2 89vmiwh4_… Math  Chat            3.83        0.954        12                 2
## 3 xdmgzj57_… HP    Jitsi           6.57        0.358         9                 0
## # ℹ 4 more variables: grand_mean <dbl>, grand_sd <dbl>, team_z_score <dbl>,
## #   is_team_outlier <lgl>
flow_clean <- flow_scores_outlier %>%
  dplyr::filter(!is_outlier) %>%
  # Entferne die Ausreißer-Analyse-Spalten (optional, falls du sie nicht brauchst)
  dplyr::select(-mean_flow, -sd_flow, -z_score, -is_outlier, -outlier_direction)

Flow proneness consistency check and score calculation

# Flow Proneness Items
flowp_items <- data %>%
  dplyr::select(participant.code,
         starts_with("Outro.1.player.fpw"),
         starts_with("Outro.1.player.fph"),
         starts_with("Outro.1.player.fpl"))

flowp_items <- flowp_items %>%
  mutate(
    `Outro.1.player.fpl1` = 6 - `Outro.1.player.fpl1`,
    `Outro.1.player.fph1` = 6 - `Outro.1.player.fph1`,
    `Outro.1.player.fpw1` = 6 - `Outro.1.player.fpw1`
  )

# Prüfe interne Konsistenz pro Dimension
alpha_work <- psych::alpha(flowp_items %>% dplyr::select(starts_with("Outro.1.player.fpw"))); alpha_work
## 
## Reliability analysis   
## Call: psych::alpha(x = flowp_items %>% dplyr::select(starts_with("Outro.1.player.fpw")))
## 
##   raw_alpha std.alpha G6(smc) average_r S/N  ase mean   sd median_r
##        0.7      0.69     0.7      0.24 2.3 0.04  3.4 0.52     0.22
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.61   0.7  0.78
## Duhachek  0.62   0.7  0.78
## 
##  Reliability if an item is dropped:
##                     raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r
## Outro.1.player.fpw1      0.67      0.66    0.66      0.24 1.9    0.045 0.025
## Outro.1.player.fpw2      0.70      0.70    0.71      0.28 2.3    0.041 0.024
## Outro.1.player.fpw3      0.63      0.62    0.63      0.22 1.7    0.050 0.023
## Outro.1.player.fpw4      0.72      0.72    0.72      0.30 2.5    0.038 0.017
## Outro.1.player.fpw5      0.62      0.61    0.61      0.21 1.6    0.053 0.018
## Outro.1.player.fpw6      0.64      0.63    0.61      0.22 1.7    0.050 0.016
## Outro.1.player.fpw7      0.67      0.66    0.67      0.24 1.9    0.044 0.024
##                     med.r
## Outro.1.player.fpw1  0.22
## Outro.1.player.fpw2  0.28
## Outro.1.player.fpw3  0.20
## Outro.1.player.fpw4  0.28
## Outro.1.player.fpw5  0.20
## Outro.1.player.fpw6  0.22
## Outro.1.player.fpw7  0.20
## 
##  Item statistics 
##                       n raw.r std.r r.cor r.drop mean   sd
## Outro.1.player.fpw1 120  0.60  0.59  0.50   0.41  2.6 0.90
## Outro.1.player.fpw2 120  0.42  0.47  0.29   0.25  3.4 0.70
## Outro.1.player.fpw3 120  0.70  0.69  0.64   0.53  3.8 0.94
## Outro.1.player.fpw4 120  0.39  0.40  0.21   0.17  4.1 0.83
## Outro.1.player.fpw5 120  0.74  0.73  0.71   0.58  3.4 0.96
## Outro.1.player.fpw6 120  0.69  0.69  0.66   0.53  3.3 0.85
## Outro.1.player.fpw7 120  0.60  0.59  0.48   0.40  3.1 0.92
## 
## Non missing response frequency for each item
##                        1    2    3    4    5 miss
## Outro.1.player.fpw1 0.10 0.34 0.41 0.13 0.02    0
## Outro.1.player.fpw2 0.00 0.10 0.47 0.41 0.03    0
## Outro.1.player.fpw3 0.02 0.08 0.24 0.44 0.22    0
## Outro.1.player.fpw4 0.00 0.05 0.13 0.46 0.36    0
## Outro.1.player.fpw5 0.03 0.16 0.32 0.41 0.08    0
## Outro.1.player.fpw6 0.03 0.10 0.46 0.34 0.07    0
## Outro.1.player.fpw7 0.03 0.25 0.37 0.31 0.05    0
alpha_household <- psych::alpha(flowp_items %>% dplyr::select(starts_with("Outro.1.player.fph"))); alpha_household
## 
## Reliability analysis   
## Call: psych::alpha(x = flowp_items %>% dplyr::select(starts_with("Outro.1.player.fph")))
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean   sd median_r
##        0.7      0.71    0.72      0.26 2.5 0.042  3.6 0.59     0.26
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.61   0.7  0.78
## Duhachek  0.62   0.7  0.78
## 
##  Reliability if an item is dropped:
##                     raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r
## Outro.1.player.fph1      0.65      0.67    0.67      0.25 2.0    0.050 0.022
## Outro.1.player.fph2      0.73      0.73    0.73      0.31 2.7    0.037 0.021
## Outro.1.player.fph3      0.65      0.66    0.66      0.24 1.9    0.049 0.027
## Outro.1.player.fph4      0.69      0.70    0.70      0.28 2.3    0.044 0.024
## Outro.1.player.fph5      0.65      0.67    0.68      0.25 2.0    0.050 0.025
## Outro.1.player.fph6      0.65      0.65    0.67      0.24 1.9    0.050 0.024
## Outro.1.player.fph7      0.65      0.67    0.67      0.25 2.0    0.050 0.020
##                     med.r
## Outro.1.player.fph1  0.22
## Outro.1.player.fph2  0.37
## Outro.1.player.fph3  0.20
## Outro.1.player.fph4  0.26
## Outro.1.player.fph5  0.22
## Outro.1.player.fph6  0.22
## Outro.1.player.fph7  0.26
## 
##  Item statistics 
##                       n raw.r std.r r.cor r.drop mean   sd
## Outro.1.player.fph1 120  0.67  0.64  0.57   0.48  2.9 1.09
## Outro.1.player.fph2 120  0.46  0.43  0.25   0.20  3.3 1.15
## Outro.1.player.fph3 120  0.64  0.67  0.61   0.48  4.3 0.88
## Outro.1.player.fph4 120  0.49  0.53  0.42   0.32  4.3 0.82
## Outro.1.player.fph5 120  0.65  0.64  0.56   0.48  3.1 0.96
## Outro.1.player.fph6 120  0.66  0.68  0.61   0.51  4.1 0.88
## Outro.1.player.fph7 120  0.67  0.64  0.58   0.48  3.0 1.10
## 
## Non missing response frequency for each item
##                        1    2    3    4    5 miss
## Outro.1.player.fph1 0.10 0.26 0.30 0.28 0.06    0
## Outro.1.player.fph2 0.05 0.23 0.23 0.32 0.17    0
## Outro.1.player.fph3 0.01 0.05 0.07 0.37 0.50    0
## Outro.1.player.fph4 0.01 0.03 0.10 0.38 0.49    0
## Outro.1.player.fph5 0.02 0.25 0.42 0.22 0.10    0
## Outro.1.player.fph6 0.00 0.07 0.12 0.45 0.35    0
## Outro.1.player.fph7 0.09 0.26 0.32 0.25 0.08    0
alpha_leisure <- psych::alpha(flowp_items %>% dplyr::select(starts_with("Outro.1.player.fpl"))); alpha_leisure
## 
## Reliability analysis   
## Call: psych::alpha(x = flowp_items %>% dplyr::select(starts_with("Outro.1.player.fpl")))
## 
##   raw_alpha std.alpha G6(smc) average_r S/N   ase mean   sd median_r
##        0.8       0.8     0.8      0.37 4.1 0.028  3.8 0.64     0.33
## 
##     95% confidence boundaries 
##          lower alpha upper
## Feldt     0.74   0.8  0.85
## Duhachek  0.74   0.8  0.85
## 
##  Reliability if an item is dropped:
##                     raw_alpha std.alpha G6(smc) average_r S/N alpha se var.r
## Outro.1.player.fpl1      0.79      0.80    0.78      0.39 3.9    0.029 0.016
## Outro.1.player.fpl2      0.81      0.81    0.80      0.41 4.2    0.027 0.013
## Outro.1.player.fpl3      0.74      0.74    0.73      0.33 2.9    0.037 0.014
## Outro.1.player.fpl4      0.79      0.79    0.78      0.39 3.9    0.030 0.016
## Outro.1.player.fpl5      0.77      0.77    0.77      0.36 3.4    0.033 0.021
## Outro.1.player.fpl6      0.75      0.76    0.74      0.34 3.1    0.035 0.015
## Outro.1.player.fpl7      0.75      0.75    0.74      0.34 3.1    0.035 0.017
##                     med.r
## Outro.1.player.fpl1  0.34
## Outro.1.player.fpl2  0.40
## Outro.1.player.fpl3  0.33
## Outro.1.player.fpl4  0.35
## Outro.1.player.fpl5  0.33
## Outro.1.player.fpl6  0.33
## Outro.1.player.fpl7  0.33
## 
##  Item statistics 
##                       n raw.r std.r r.cor r.drop mean   sd
## Outro.1.player.fpl1 120  0.58  0.59  0.48   0.42  3.5 0.94
## Outro.1.player.fpl2 120  0.54  0.53  0.39   0.35  3.5 1.02
## Outro.1.player.fpl3 120  0.81  0.80  0.79   0.70  3.8 1.01
## Outro.1.player.fpl4 120  0.60  0.60  0.49   0.43  3.9 0.96
## Outro.1.player.fpl5 120  0.69  0.69  0.61   0.55  3.6 0.96
## Outro.1.player.fpl6 120  0.75  0.76  0.72   0.64  3.9 0.96
## Outro.1.player.fpl7 120  0.75  0.77  0.74   0.66  4.2 0.80
## 
## Non missing response frequency for each item
##                        1    2    3    4    5 miss
## Outro.1.player.fpl1 0.03 0.15 0.24 0.49 0.09    0
## Outro.1.player.fpl2 0.04 0.12 0.22 0.47 0.14    0
## Outro.1.player.fpl3 0.03 0.09 0.24 0.39 0.25    0
## Outro.1.player.fpl4 0.02 0.06 0.22 0.38 0.32    0
## Outro.1.player.fpl5 0.01 0.16 0.21 0.47 0.16    0
## Outro.1.player.fpl6 0.03 0.05 0.24 0.41 0.28    0
## Outro.1.player.fpl7 0.01 0.02 0.14 0.44 0.39    0
# Aggregiere zu drei Scores + Gesamtwert
flow_proneness_scores <- flowp_items %>%
  mutate(
    fp_work = rowMeans(dplyr::select(., starts_with("Outro.1.player.fpw")), na.rm = TRUE),
    fp_household = rowMeans(dplyr::select(., starts_with("Outro.1.player.fph")), na.rm = TRUE),
    fp_leisure = rowMeans(dplyr::select(., starts_with("Outro.1.player.fpl")), na.rm = TRUE)
  ) %>%
  mutate(fp_total = rowMeans(dplyr::select(., fp_work, fp_household, fp_leisure), na.rm = TRUE)) %>%
  dplyr::select(participant.code, fp_total)

flow_scores <- flow_scores %>%
  left_join(flow_proneness_scores, by = "participant.code")

flow_clean <- flow_clean %>%
  left_join(flow_proneness_scores, by = "participant.code")

Linear mixed model for flow with individual-level variables (level-1) nested within teams (level-2), Calculation of Goodness-of-Fit criteria AIC, BIC, Marginal R2 and Conditional R2

# Erweiterte Regressionsanalyse mit emmeans Post-Hoc Tests

library(lme4)
library(performance)
library(emmeans)
library(dplyr)
library(tibble)

# ================================================================================
# TEIL 1: MATH TASK ANALYSE
# ================================================================================

print("=== MATH TASK REGRESSIONSANALYSE ===")
## [1] "=== MATH TASK REGRESSIONSANALYSE ==="
# Filter: Nur Mathe-Task
flow_scores_math <- flow_clean %>% 
  filter(task == "Math")

# Modell-Datensatz vorbereiten
model_data_math <- flow_scores_math %>%
  mutate(
    synchronicity = ifelse(comm == "Jitsi", "High", "Low"),
    synchronicity = factor(synchronicity, levels = c("Low", "High")),
    difficulty = factor(difficulty, levels = c("Easy", "Optimal_Selected", "Optimal_Calibrated", "Hard"))
  )

print(sprintf("Math Task: %d Beobachtungen, %d Teams, %d Teilnehmer", 
              nrow(model_data_math),
              length(unique(model_data_math$team_id)),
              length(unique(model_data_math$participant.code))))
## [1] "Math Task: 460 Beobachtungen, 40 Teams, 120 Teilnehmer"
# Modell 1: Synchronicity + Difficulty
print("\n--- MATH MODELL 1: Haupteffekte ---")
## [1] "\n--- MATH MODELL 1: Haupteffekte ---"
model_math_1 <- lmer(
  flow_score ~ synchronicity + difficulty + 
    (1 | team_id) + (1 | participant.code),
  data = model_data_math
)
summary(model_math_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ synchronicity + difficulty + (1 | team_id) + (1 |  
##     participant.code)
##    Data: model_data_math
## 
## REML criterion at convergence: 1095.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2447 -0.5471  0.0581  0.5265  2.5791 
## 
## Random effects:
##  Groups           Name        Variance  Std.Dev. 
##  participant.code (Intercept) 5.220e-01 0.7224708
##  team_id          (Intercept) 8.920e-08 0.0002987
##  Residual                     3.857e-01 0.6210210
## Number of obs: 460, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                               Estimate Std. Error        df t value Pr(>|t|)
## (Intercept)                    5.62467    0.11410 177.54189  49.297   <2e-16
## synchronicityLow              -0.25295    0.14430 116.22173  -1.753   0.0822
## difficultyOptimal_Selected     0.14887    0.08118 336.19559   1.834   0.0676
## difficultyOptimal_Calibrated   0.13833    0.08133 337.21259   1.701   0.0899
## difficultyHard                -0.75027    0.08415 338.19909  -8.916   <2e-16
##                                 
## (Intercept)                  ***
## synchronicityLow             .  
## difficultyOptimal_Selected   .  
## difficultyOptimal_Calibrated .  
## difficultyHard               ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) synchL dffO_S dffO_C
## synchrnctyL -0.638                     
## dffcltyOp_S -0.365  0.005              
## dffcltyOp_C -0.367  0.007  0.507       
## diffcltyHrd -0.352  0.006  0.489  0.488
# Modell 2: Interaktion hinzufügen
print("\n--- MATH MODELL 2: Mit Interaktion ---")
## [1] "\n--- MATH MODELL 2: Mit Interaktion ---"
model_math_2 <- lmer(
  flow_score ~ synchronicity * difficulty + 
    (1 | team_id) + (1 | participant.code),
  data = model_data_math
)
summary(model_math_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ synchronicity * difficulty + (1 | team_id) + (1 |  
##     participant.code)
##    Data: model_data_math
## 
## REML criterion at convergence: 1097
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -3.15298 -0.53538  0.04109  0.55685  2.68174 
## 
## Random effects:
##  Groups           Name        Variance  Std.Dev. 
##  participant.code (Intercept) 5.225e-01 0.7228519
##  team_id          (Intercept) 2.009e-08 0.0001418
##  Residual                     3.837e-01 0.6194356
## Number of obs: 460, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                                Estimate Std. Error        df
## (Intercept)                                     5.58486    0.12541 242.65218
## synchronicityLow                               -0.17560    0.17559 236.32730
## difficultyOptimal_Selected                      0.25180    0.11602 334.04529
## difficultyOptimal_Calibrated                    0.09756    0.11644 336.09271
## difficultyHard                                 -0.64582    0.11974 335.39334
## synchronicityLow:difficultyOptimal_Selected    -0.20180    0.16202 333.36050
## synchronicityLow:difficultyOptimal_Calibrated   0.08207    0.16232 334.41634
## synchronicityLow:difficultyHard                -0.20627    0.16792 335.30947
##                                               t value Pr(>|t|)    
## (Intercept)                                    44.533  < 2e-16 ***
## synchronicityLow                               -1.000   0.3183    
## difficultyOptimal_Selected                      2.170   0.0307 *  
## difficultyOptimal_Calibrated                    0.838   0.4027    
## difficultyHard                                 -5.394 1.31e-07 ***
## synchronicityLow:difficultyOptimal_Selected    -1.246   0.2138    
## synchronicityLow:difficultyOptimal_Calibrated   0.506   0.6135    
## synchronicityLow:difficultyHard                -1.228   0.2202    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) synchL dffO_S dffO_C dffclH sL:O_S sL:O_C
## synchrnctyL -0.714                                          
## dffcltyOp_S -0.478  0.341                                   
## dffcltyOp_C -0.481  0.343  0.515                            
## diffcltyHrd -0.461  0.329  0.498  0.496                     
## synchrL:O_S  0.342 -0.469 -0.716 -0.369 -0.357              
## synchrL:O_C  0.345 -0.471 -0.370 -0.717 -0.356  0.508       
## synchrncL:H  0.329 -0.452 -0.355 -0.354 -0.713  0.490  0.489
# Modell 3: Zusätzlich Flow Proneness & Condition Order
print("\n--- MATH MODELL 3: Vollständiges Modell ---")
## [1] "\n--- MATH MODELL 3: Vollständiges Modell ---"
model_math_3 <- lmer(
  flow_score ~ synchronicity * difficulty + 
    fp_total + order +
    (1 | team_id) + (1 | participant.code),
  data = model_data_math
)
summary(model_math_3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ synchronicity * difficulty + fp_total + order +  
##     (1 | team_id) + (1 | participant.code)
##    Data: model_data_math
## 
## REML criterion at convergence: 1085.6
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.98009 -0.52242  0.04769  0.56084  2.71810 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.43989  0.6632  
##  team_id          (Intercept) 0.01676  0.1295  
##  Residual                     0.38332  0.6191  
## Number of obs: 460, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                                Estimate Std. Error        df
## (Intercept)                                     3.40305    0.65131 117.56241
## synchronicityLow                               -0.26404    0.17523  73.91508
## difficultyOptimal_Selected                      0.25423    0.11595 334.60287
## difficultyOptimal_Calibrated                    0.10220    0.11638 336.56028
## difficultyHard                                 -0.64703    0.11965 336.14706
## fp_total                                        0.67812    0.18389 112.91233
## order['B', 'F', 'O', 'A']                      -0.44735    0.21955  35.30821
## order['F', 'A', 'B', 'O']                      -0.32052    0.21534  37.04443
## order['O', 'B', 'A', 'F']                      -0.04048    0.20575  35.71921
## synchronicityLow:difficultyOptimal_Selected    -0.20423    0.16193 333.88900
## synchronicityLow:difficultyOptimal_Calibrated   0.07743    0.16224 334.89996
## synchronicityLow:difficultyHard                -0.21083    0.16781 336.00839
##                                               t value Pr(>|t|)    
## (Intercept)                                     5.225 7.64e-07 ***
## synchronicityLow                               -1.507  0.13612    
## difficultyOptimal_Selected                      2.193  0.02903 *  
## difficultyOptimal_Calibrated                    0.878  0.38049    
## difficultyHard                                 -5.408 1.21e-07 ***
## fp_total                                        3.688  0.00035 ***
## order['B', 'F', 'O', 'A']                      -2.038  0.04915 *  
## order['F', 'A', 'B', 'O']                      -1.488  0.14509    
## order['O', 'B', 'A', 'F']                      -0.197  0.84515    
## synchronicityLow:difficultyOptimal_Selected    -1.261  0.20811    
## synchronicityLow:difficultyOptimal_Calibrated   0.477  0.63351    
## synchronicityLow:difficultyHard                -1.256  0.20987    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) synchL dffO_S dffO_C dffclH fp_ttl o['B''' o['F''' o['O'''
## synchrnctyL -0.102                                                           
## dffcltyOp_S -0.101  0.341                                                    
## dffcltyOp_C -0.108  0.342  0.515                                             
## diffcltyHrd -0.091  0.331  0.498  0.496                                      
## fp_total    -0.954 -0.061  0.008  0.016  0.001                               
## o['B','F''' -0.049  0.109  0.003  0.005  0.009 -0.147                        
## o['F','A'''  0.015  0.153 -0.003 -0.010  0.006 -0.224  0.573                 
## o['O','B''' -0.016  0.037  0.002  0.001  0.006 -0.180  0.574   0.600         
## synchrL:O_S  0.072 -0.469 -0.716 -0.369 -0.357 -0.006 -0.002   0.002  -0.002 
## synchrL:O_C  0.078 -0.470 -0.370 -0.717 -0.356 -0.012 -0.004   0.007  -0.001 
## synchrncL:H  0.074 -0.452 -0.355 -0.354 -0.713 -0.010 -0.004  -0.003  -0.004 
##             sL:O_S sL:O_C
## synchrnctyL              
## dffcltyOp_S              
## dffcltyOp_C              
## diffcltyHrd              
## fp_total                 
## o['B','F'''              
## o['F','A'''              
## o['O','B'''              
## synchrL:O_S              
## synchrL:O_C  0.508       
## synchrncL:H  0.490  0.489
anova(model_math_3)
# Modellvergleich
print("\n--- MATH MODELLVERGLEICH ---")
## [1] "\n--- MATH MODELLVERGLEICH ---"
aic_comparison_math <- AIC(model_math_1, model_math_2, model_math_3)
bic_comparison_math <- BIC(model_math_1, model_math_2, model_math_3)
print("AIC Vergleich:")
## [1] "AIC Vergleich:"
print(aic_comparison_math)
##              df      AIC
## model_math_1  8 1111.659
## model_math_2 11 1118.953
## model_math_3 15 1115.559
print("BIC Vergleich:")
## [1] "BIC Vergleich:"
print(bic_comparison_math)
##              df      BIC
## model_math_1  8 1144.709
## model_math_2 11 1164.396
## model_math_3 15 1177.527
# R² Vergleich
r2_math_1 <- r2(model_math_1)
## Warning: Can't compute random effect variances. Some variance components equal
##   zero. Your model may suffer from singularity (see `?lme4::isSingular`
##   and `?performance::check_singularity`).
##   Decrease the `tolerance` level to force the calculation of random effect
##   variances, or impose priors on your random effects parameters (using
##   packages like `brms` or `glmmTMB`).
## Random effect variances not available. Returned R2 does not account for random effects.
r2_math_2 <- r2(model_math_2)
## Warning: Can't compute random effect variances. Some variance components equal
##   zero. Your model may suffer from singularity (see `?lme4::isSingular`
##   and `?performance::check_singularity`).
##   Decrease the `tolerance` level to force the calculation of random effect
##   variances, or impose priors on your random effects parameters (using
##   packages like `brms` or `glmmTMB`).
## Random effect variances not available. Returned R2 does not account for random effects.
r2_math_3 <- r2(model_math_3)

print("\nR² Vergleich:")
## [1] "\nR² Vergleich:"
print(paste("Modell 1 - Marginal R²:", round(r2_math_1$R2_marginal, 4), 
            "Conditional R²:", round(r2_math_1$R2_conditional, 4)))
## [1] "Modell 1 - Marginal R²: 0.2753 Conditional R²: NA"
print(paste("Modell 2 - Marginal R²:", round(r2_math_2$R2_marginal, 4), 
            "Conditional R²:", round(r2_math_2$R2_conditional, 4)))
## [1] "Modell 2 - Marginal R²: 0.2819 Conditional R²: NA"
print(paste("Modell 3 - Marginal R²:", round(r2_math_3$R2_marginal, 4), 
            "Conditional R²:", round(r2_math_3$R2_conditional, 4)))
## [1] "Modell 3 - Marginal R²: 0.216 Conditional R²: 0.6422"
# ================================================================================
# TEIL 1b: MATH TASK - EMMEANS POST-HOC TESTS
# ================================================================================

print("\n=== MATH TASK - EMMEANS POST-HOC ANALYSEN ===")
## [1] "\n=== MATH TASK - EMMEANS POST-HOC ANALYSEN ==="
# Wähle das beste Modell für Post-Hoc Tests (basierend auf AIC/BIC)
best_math_model <- model_math_3  # Anpassbar je nach Ergebnissen

# Between-Subjects Tests: Synchronicity Unterschiede für jede Difficulty
print("\n--- Between-Subjects Tests: Synchronicity|Difficulty ---")
## [1] "\n--- Between-Subjects Tests: Synchronicity|Difficulty ---"
bs_tests_math <- emmeans(best_math_model, specs = pairwise ~ synchronicity|difficulty, adjust = "none")$contrasts %>%
  as_tibble() %>%
  # Manuelle BH Korrektur
  mutate(p.adj = p.adjust(p.value, "BH")) %>%
  # Difficulty Labels anpassen (falls gewünscht)
  mutate(difficulty = dplyr::recode(difficulty, 
                                   "Easy" = "Easy",
                                   "Optimal_Selected" = "Optimal\n(Selected)",
                                   "Optimal_Calibrated" = "Optimal\n(Calibrated)",
                                   "Hard" = "Hard")) %>%
  # Numerische Werte runden (neue Syntax)
  mutate(across(where(is.numeric), ~ round(.x, 4)))

print("Between-Subjects Tests (Synchronicity Vergleiche pro Difficulty):")
## [1] "Between-Subjects Tests (Synchronicity Vergleiche pro Difficulty):"
print(bs_tests_math)
## # A tibble: 4 × 8
##   contrast   difficulty              estimate    SE    df t.ratio p.value  p.adj
##   <fct>      <fct>                      <dbl> <dbl> <dbl>   <dbl>   <dbl>  <dbl>
## 1 High - Low "Easy"                     0.264 0.175  74.2    1.51 0.136   0.182 
## 2 High - Low "Optimal\n(Selected)"      0.468 0.174  72.5    2.69 0.00886 0.0197
## 3 High - Low "Optimal\n(Calibrated)"    0.187 0.174  72.6    1.07 0.287   0.287 
## 4 High - Low "Hard"                     0.475 0.180  81.3    2.64 0.00984 0.0197
# Within-Subjects Tests: Difficulty Unterschiede für jede Synchronicity
print("\n--- Within-Subjects Tests: Difficulty|Synchronicity ---")
## [1] "\n--- Within-Subjects Tests: Difficulty|Synchronicity ---"
ws_tests_math <- emmeans(best_math_model, specs = pairwise ~ difficulty|synchronicity, adjust = "none")$contrasts %>%
  as_tibble() %>%
  # Manuelle BH Korrektur
  mutate(p.adj = p.adjust(p.value, "BH")) %>%
  # Numerische Werte runden (neue Syntax)
  mutate(across(where(is.numeric), ~ round(.x, 4)))

print("Within-Subjects Tests (Difficulty Vergleiche pro Synchronicity):")
## [1] "Within-Subjects Tests (Difficulty Vergleiche pro Synchronicity):"
print(ws_tests_math)
## # A tibble: 12 × 8
##    contrast         synchronicity estimate    SE    df t.ratio  p.value    p.adj
##    <fct>            <fct>            <dbl> <dbl> <dbl>   <dbl>    <dbl>    <dbl>
##  1 Easy - Optimal_… High           -0.254  0.116  336.  -2.19  2.90e- 2 4.98e- 2
##  2 Easy - Optimal_… High           -0.102  0.116  338.  -0.878 3.81e- 1 4.15e- 1
##  3 Easy - Hard      High            0.647  0.120  337.   5.41  1.22e- 7 2.43e- 7
##  4 Optimal_Selecte… High            0.152  0.114  336.   1.33  1.85e- 1 2.46e- 1
##  5 Optimal_Selecte… High            0.901  0.118  337.   7.63  2.36e-13 9.45e-13
##  6 Optimal_Calibra… High            0.749  0.119  339.   6.32  8.14e-10 1.95e- 9
##  7 Easy - Optimal_… Low            -0.0500 0.113  334.  -0.442 6.59e- 1 6.59e- 1
##  8 Easy - Optimal_… Low            -0.180  0.113  334.  -1.59  1.13e- 1 1.69e- 1
##  9 Easy - Hard      Low             0.858  0.118  337.   7.29  2.23e-12 6.68e-12
## 10 Optimal_Selecte… Low            -0.130  0.113  334.  -1.15  2.52e- 1 3.03e- 1
## 11 Optimal_Selecte… Low             0.908  0.118  337.   7.71  1.38e-13 8.31e-13
## 12 Optimal_Calibra… Low             1.04   0.118  337.   8.82  6.44e-17 7.73e-16
# Marginal Means zur Interpretation
print("\n--- Marginal Means für Math Task ---")
## [1] "\n--- Marginal Means für Math Task ---"
marginal_means_math <- emmeans(best_math_model, ~ synchronicity * difficulty) %>%
  as_tibble() %>%
  mutate(across(where(is.numeric), ~ round(.x, 4)))
print(marginal_means_math)
## # A tibble: 8 × 7
##   synchronicity difficulty         emmean    SE    df lower.CL upper.CL
##   <fct>         <fct>               <dbl> <dbl> <dbl>    <dbl>    <dbl>
## 1 High          Easy                 5.63 0.125  77.0     5.38     5.88
## 2 Low           Easy                 5.37 0.122  72.3     5.13     5.61
## 3 High          Optimal_Selected     5.89 0.123  73.4     5.64     6.13
## 4 Low           Optimal_Selected     5.42 0.122  72.3     5.18     5.66
## 5 High          Optimal_Calibrated   5.74 0.124  73.5     5.49     5.98
## 6 Low           Optimal_Calibrated   5.55 0.122  72.3     5.31     5.79
## 7 High          Hard                 4.99 0.127  81.3     4.73     5.24
## 8 Low           Hard                 4.51 0.126  82.1     4.26     4.76
# ================================================================================
# TEIL 2: HIDDEN PROFILE TASK ANALYSE
# ================================================================================

print("\n\n=== HIDDEN PROFILE TASK REGRESSIONSANALYSE ===")
## [1] "\n\n=== HIDDEN PROFILE TASK REGRESSIONSANALYSE ==="
# Filter: Nur HP-Task
flow_scores_hp <- flow_clean %>% 
  filter(task == "HP")

# Modell-Datensatz vorbereiten
model_data_hp <- flow_scores_hp %>%
  mutate(
    synchronicity = ifelse(comm == "Jitsi", "High", "Low"),
    synchronicity = factor(synchronicity, levels = c("Low", "High")),
    difficulty = factor(difficulty, levels = c("Easy", "Medium", "Hard"))  # HP hat nur 3 Stufen
  )

print(sprintf("HP Task: %d Beobachtungen, %d Teams, %d Teilnehmer", 
              nrow(model_data_hp),
              length(unique(model_data_hp$team_id)),
              length(unique(model_data_hp$participant.code))))
## [1] "HP Task: 345 Beobachtungen, 40 Teams, 120 Teilnehmer"
# Modell 1: Synchronicity + Difficulty
print("\n--- HP MODELL 1: Haupteffekte ---")
## [1] "\n--- HP MODELL 1: Haupteffekte ---"
model_hp_1 <- lmer(
  flow_score ~ synchronicity + difficulty + 
    (1 | team_id) + (1 | participant.code),
  data = model_data_hp
)
summary(model_hp_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ synchronicity + difficulty + (1 | team_id) + (1 |  
##     participant.code)
##    Data: model_data_hp
## 
## REML criterion at convergence: 876.1
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.49419 -0.58660  0.04169  0.55949  2.24528 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.6027   0.7764  
##  team_id          (Intercept) 0.1383   0.3719  
##  Residual                     0.3822   0.6182  
## Number of obs: 345, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                    Estimate Std. Error         df t value Pr(>|t|)    
## (Intercept)        5.391656   0.145943  45.639159  36.944  < 2e-16 ***
## synchronicityLow   0.035977   0.196163  37.252982   0.183  0.85548    
## difficultyMedium  -0.005226   0.081618 224.101864  -0.064  0.94900    
## difficultyHard    -0.213873   0.082199 223.905076  -2.602  0.00989 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) synchL dffclM
## synchrnctyL -0.670              
## diffcltyMdm -0.273 -0.003       
## diffcltyHrd -0.269 -0.003  0.489
# Modell 2: Interaktion hinzufügen
print("\n--- HP MODELL 2: Mit Interaktion ---")
## [1] "\n--- HP MODELL 2: Mit Interaktion ---"
model_hp_2 <- lmer(
  flow_score ~ synchronicity * difficulty + 
    (1 | team_id) + (1 | participant.code),
  data = model_data_hp
)
summary(model_hp_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ synchronicity * difficulty + (1 | team_id) + (1 |  
##     participant.code)
##    Data: model_data_hp
## 
## REML criterion at convergence: 878.2
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.56665 -0.58281  0.05367  0.56355  2.31699 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.6021   0.7759  
##  team_id          (Intercept) 0.1376   0.3710  
##  Residual                     0.3829   0.6188  
## Number of obs: 345, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                    Estimate Std. Error        df t value
## (Intercept)                         5.44815    0.15264  54.57755  35.693
## synchronicityLow                   -0.07949    0.21663  55.30888  -0.367
## difficultyMedium                   -0.07645    0.11444 221.14185  -0.668
## difficultyHard                     -0.31791    0.11588 221.82377  -2.743
## synchronicityLow:difficultyMedium   0.14535    0.16341 222.19588   0.889
## synchronicityLow:difficultyHard     0.21013    0.16455 221.96750   1.277
##                                   Pr(>|t|)    
## (Intercept)                        < 2e-16 ***
## synchronicityLow                   0.71508    
## difficultyMedium                   0.50480    
## difficultyHard                     0.00658 ** 
## synchronicityLow:difficultyMedium  0.37470    
## synchronicityLow:difficultyHard    0.20295    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) synchL dffclM dffclH synL:M
## synchrnctyL -0.705                            
## diffcltyMdm -0.365  0.257                     
## diffcltyHrd -0.361  0.254  0.486              
## synchrncL:M  0.256 -0.370 -0.700 -0.340       
## synchrncL:H  0.254 -0.366 -0.342 -0.704  0.490
# Modell 3: Zusätzlich Flow Proneness & Condition Order
print("\n--- HP MODELL 3: Vollständiges Modell ---")
## [1] "\n--- HP MODELL 3: Vollständiges Modell ---"
model_hp_3 <- lmer(
  flow_score ~ synchronicity * difficulty + 
    fp_total + order +
    (1 | team_id) + (1 | participant.code),
  data = model_data_hp
)
summary(model_hp_3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ synchronicity * difficulty + fp_total + order +  
##     (1 | team_id) + (1 | participant.code)
##    Data: model_data_hp
## 
## REML criterion at convergence: 875.1
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.60631 -0.56416  0.03455  0.55765  2.33138 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.5724   0.7566  
##  team_id          (Intercept) 0.1455   0.3814  
##  Residual                     0.3829   0.6188  
## Number of obs: 345, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                    Estimate Std. Error        df t value
## (Intercept)                         3.66401    0.78062 114.25462   4.694
## synchronicityLow                   -0.09383    0.21758  52.37131  -0.431
## difficultyMedium                   -0.07767    0.11444 221.16556  -0.679
## difficultyHard                     -0.32112    0.11588 221.82508  -2.771
## fp_total                            0.52230    0.21700 110.02701   2.407
## order['HARD', 'EASY', 'MED']       -0.24796    0.23602  36.18122  -1.051
## order['MED', 'HARD', 'EASY']       -0.03779    0.24353  35.61169  -0.155
## synchronicityLow:difficultyMedium   0.15165    0.16341 222.20104   0.928
## synchronicityLow:difficultyHard     0.21501    0.16455 221.97312   1.307
##                                   Pr(>|t|)    
## (Intercept)                       7.51e-06 ***
## synchronicityLow                   0.66805    
## difficultyMedium                   0.49803    
## difficultyHard                     0.00606 ** 
## fp_total                           0.01775 *  
## order['HARD', 'EASY', 'MED']       0.30042    
## order['MED', 'HARD', 'EASY']       0.87754    
## synchronicityLow:difficultyMedium  0.35442    
## synchronicityLow:difficultyHard    0.19269    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) synchL dffclM dffclH fp_ttl o['H'' o['M'' synL:M
## synchrnctyL -0.097                                                 
## diffcltyMdm -0.066  0.257                                          
## diffcltyHrd -0.062  0.253  0.486                                   
## fp_total    -0.971 -0.048 -0.005 -0.009                            
## o['HARD',''  0.004 -0.007 -0.003  0.003 -0.114                     
## o['MED','H' -0.025  0.096  0.000 -0.006 -0.097  0.359              
## synchrncL:M  0.037 -0.369 -0.700 -0.341  0.014 -0.006 -0.001       
## synchrncL:H  0.037 -0.364 -0.342 -0.704  0.012  0.001  0.003  0.490
anova(model_hp_3)
# Modellvergleich
print("\n--- HP MODELLVERGLEICH ---")
## [1] "\n--- HP MODELLVERGLEICH ---"
aic_comparison_hp <- AIC(model_hp_1, model_hp_2, model_hp_3)
bic_comparison_hp <- BIC(model_hp_1, model_hp_2, model_hp_3)
print("AIC Vergleich:")
## [1] "AIC Vergleich:"
print(aic_comparison_hp)
##            df      AIC
## model_hp_1  7 890.1049
## model_hp_2  9 896.2130
## model_hp_3 12 899.1458
print("BIC Vergleich:")
## [1] "BIC Vergleich:"
print(bic_comparison_hp)
##            df      BIC
## model_hp_1  7 917.0097
## model_hp_2  9 930.8049
## model_hp_3 12 945.2684
# R² Vergleich
r2_hp_1 <- r2(model_hp_1)
r2_hp_2 <- r2(model_hp_2)
r2_hp_3 <- r2(model_hp_3)

print("\nR² Vergleich:")
## [1] "\nR² Vergleich:"
print(paste("Modell 1 - Marginal R²:", round(r2_hp_1$R2_marginal, 4), 
            "Conditional R²:", round(r2_hp_1$R2_conditional, 4)))
## [1] "Modell 1 - Marginal R²: 0.0089 Conditional R²: 0.6628"
print(paste("Modell 2 - Marginal R²:", round(r2_hp_2$R2_marginal, 4), 
            "Conditional R²:", round(r2_hp_2$R2_conditional, 4)))
## [1] "Modell 2 - Marginal R²: 0.0107 Conditional R²: 0.6626"
print(paste("Modell 3 - Marginal R²:", round(r2_hp_3$R2_marginal, 4), 
            "Conditional R²:", round(r2_hp_3$R2_conditional, 4)))
## [1] "Modell 3 - Marginal R²: 0.0493 Conditional R²: 0.6693"
# ================================================================================
# TEIL 2b: HIDDEN PROFILE TASK - EMMEANS POST-HOC TESTS
# ================================================================================

print("\n=== HIDDEN PROFILE TASK - EMMEANS POST-HOC ANALYSEN ===")
## [1] "\n=== HIDDEN PROFILE TASK - EMMEANS POST-HOC ANALYSEN ==="
# Wähle das beste Modell für Post-Hoc Tests
best_hp_model <- model_hp_3  # Anpassbar je nach Ergebnissen

# Between-Subjects Tests: Synchronicity Unterschiede für jede Difficulty
print("\n--- Between-Subjects Tests: Synchronicity|Difficulty ---")
## [1] "\n--- Between-Subjects Tests: Synchronicity|Difficulty ---"
bs_tests_hp <- emmeans(best_hp_model, specs = pairwise ~ synchronicity|difficulty, adjust = "none")$contrasts %>%
  as_tibble() %>%
  # Manuelle BH Korrektur
  mutate(p.adj = p.adjust(p.value, "BH")) %>%
  # Numerische Werte runden (neue Syntax)
  mutate(across(where(is.numeric), ~ round(.x, 4)))

print("Between-Subjects Tests (Synchronicity Vergleiche pro Difficulty):")
## [1] "Between-Subjects Tests (Synchronicity Vergleiche pro Difficulty):"
print(bs_tests_hp)
## # A tibble: 3 × 8
##   contrast   difficulty estimate    SE    df t.ratio p.value p.adj
##   <fct>      <fct>         <dbl> <dbl> <dbl>   <dbl>   <dbl> <dbl>
## 1 High - Low Easy         0.0938 0.218  52.8   0.431   0.668 0.793
## 2 High - Low Medium      -0.0578 0.219  53.8  -0.264   0.793 0.793
## 3 High - Low Hard        -0.121  0.220  54.9  -0.551   0.584 0.793
# Within-Subjects Tests: Difficulty Unterschiede für jede Synchronicity
print("\n--- Within-Subjects Tests: Difficulty|Synchronicity ---")
## [1] "\n--- Within-Subjects Tests: Difficulty|Synchronicity ---"
ws_tests_hp <- emmeans(best_hp_model, specs = pairwise ~ difficulty|synchronicity, adjust = "none")$contrasts %>%
  as_tibble() %>%
  # Manuelle BH Korrektur
  mutate(p.adj = p.adjust(p.value, "BH")) %>%
  # Numerische Werte runden (neue Syntax)
  mutate(across(where(is.numeric), ~ round(.x, 4)))

print("Within-Subjects Tests (Difficulty Vergleiche pro Synchronicity):")
## [1] "Within-Subjects Tests (Difficulty Vergleiche pro Synchronicity):"
print(ws_tests_hp)
## # A tibble: 6 × 8
##   contrast      synchronicity estimate    SE    df t.ratio p.value  p.adj
##   <fct>         <fct>            <dbl> <dbl> <dbl>   <dbl>   <dbl>  <dbl>
## 1 Easy - Medium High            0.0777 0.114  223.   0.679 0.498   0.527 
## 2 Easy - Hard   High            0.321  0.116  223.   2.77  0.00607 0.0364
## 3 Medium - Hard High            0.243  0.117  223.   2.08  0.0382  0.115 
## 4 Easy - Medium Low            -0.0740 0.117  225.  -0.634 0.527   0.527 
## 5 Easy - Hard   Low             0.106  0.117  224.   0.908 0.365   0.527 
## 6 Medium - Hard Low             0.180  0.118  224.   1.53  0.127   0.254
# Marginal Means zur Interpretation
print("\n--- Marginal Means für HP Task ---")
## [1] "\n--- Marginal Means für HP Task ---"
marginal_means_hp <- emmeans(best_hp_model, ~ synchronicity * difficulty) %>%
  as_tibble() %>%
  mutate(across(where(is.numeric), ~ round(.x, 4)))
print(marginal_means_hp)
## # A tibble: 6 × 7
##   synchronicity difficulty emmean    SE    df lower.CL upper.CL
##   <fct>         <fct>       <dbl> <dbl> <dbl>    <dbl>    <dbl>
## 1 High          Easy         5.44 0.154  51.7     5.13     5.75
## 2 Low           Easy         5.35 0.158  52.6     5.03     5.66
## 3 High          Medium       5.36 0.155  53.0     5.05     5.67
## 4 Low           Medium       5.42 0.158  53.1     5.10     5.74
## 5 High          Hard         5.12 0.156  54.4     4.80     5.43
## 6 Low           Hard         5.24 0.159  53.9     4.92     5.56
# ================================================================================
# TEIL 3: ÜBERGREIFENDE ZUSAMMENFASSUNG
# ================================================================================

print("\n\n=== ÜBERGREIFENDE ZUSAMMENFASSUNG ===")
## [1] "\n\n=== ÜBERGREIFENDE ZUSAMMENFASSUNG ==="
# Sammle signifikante Ergebnisse
print("\n--- Signifikante Between-Subjects Effekte (Synchronicity) ---")
## [1] "\n--- Signifikante Between-Subjects Effekte (Synchronicity) ---"
# Math Task signifikante BS Effekte
math_significant_bs <- bs_tests_math %>%
  filter(p.adj < 0.05) %>%
  mutate(Task = "Math")

# HP Task signifikante BS Effekte  
hp_significant_bs <- bs_tests_hp %>%
  filter(p.adj < 0.05) %>%
  mutate(Task = "HP")

# Kombiniere signifikante Ergebnisse
all_significant_bs <- bind_rows(math_significant_bs, hp_significant_bs)

if (nrow(all_significant_bs) > 0) {
  print("Signifikante Synchronicity Unterschiede:")
  print(all_significant_bs %>% dplyr::select(Task, difficulty, estimate, p.value, p.adj))
} else {
  print("Keine signifikanten Synchronicity Unterschiede gefunden.")
}
## [1] "Signifikante Synchronicity Unterschiede:"
## # A tibble: 2 × 5
##   Task  difficulty            estimate p.value  p.adj
##   <chr> <fct>                    <dbl>   <dbl>  <dbl>
## 1 Math  "Optimal\n(Selected)"    0.468 0.00886 0.0197
## 2 Math  "Hard"                   0.475 0.00984 0.0197
print("\n--- Signifikante Within-Subjects Effekte (Difficulty) ---")
## [1] "\n--- Signifikante Within-Subjects Effekte (Difficulty) ---"
# Math Task signifikante WS Effekte
math_significant_ws <- ws_tests_math %>%
  filter(p.adj < 0.05) %>%
  mutate(Task = "Math")

# HP Task signifikante WS Effekte
hp_significant_ws <- ws_tests_hp %>%
  filter(p.adj < 0.05) %>%
  mutate(Task = "HP")

# Kombiniere signifikante Ergebnisse
all_significant_ws <- bind_rows(math_significant_ws, hp_significant_ws)

if (nrow(all_significant_ws) > 0) {
  print("Signifikante Difficulty Unterschiede:")
  print(all_significant_ws %>% 
         dplyr::select(Task, synchronicity, contrast, estimate, p.value, p.adj) %>%
         head(10))  # Zeige nur die ersten 10 wegen der Vielzahl an Paarvergleichen
} else {
  print("Keine signifikanten Difficulty Unterschiede gefunden.")
}
## [1] "Signifikante Difficulty Unterschiede:"
## # A tibble: 8 × 6
##   Task  synchronicity contrast                  estimate  p.value    p.adj
##   <chr> <fct>         <fct>                        <dbl>    <dbl>    <dbl>
## 1 Math  High          Easy - Optimal_Selected     -0.254 2.90e- 2 4.98e- 2
## 2 Math  High          Easy - Hard                  0.647 1.22e- 7 2.43e- 7
## 3 Math  High          Optimal_Selected - Hard      0.901 2.36e-13 9.45e-13
## 4 Math  High          Optimal_Calibrated - Hard    0.749 8.14e-10 1.95e- 9
## 5 Math  Low           Easy - Hard                  0.858 2.23e-12 6.68e-12
## 6 Math  Low           Optimal_Selected - Hard      0.908 1.38e-13 8.31e-13
## 7 Math  Low           Optimal_Calibrated - Hard    1.04  6.44e-17 7.73e-16
## 8 HP    High          Easy - Hard                  0.321 6.07e- 3 3.64e- 2
# Interaction Plots für Estimated Marginal Means
# ================================================================================

library(ggplot2)
library(emmeans)

print("=== INTERACTION PLOTS FÜR ESTIMATED MARGINAL MEANS ===")
## [1] "=== INTERACTION PLOTS FÜR ESTIMATED MARGINAL MEANS ==="
# ================================================================================
# TEIL 1: MATH TASK INTERACTION PLOT
# ================================================================================

print("--- MATH TASK INTERACTION PLOT ---")
## [1] "--- MATH TASK INTERACTION PLOT ---"
# Erstelle ggplot-kompatible Daten für Math Task
math_plot_data <- marginal_means_math %>%
  mutate(
    # Kommunikationslabel anpassen
    communication = case_when(
      synchronicity == "Low" ~ "Chat",
      synchronicity == "High" ~ "Jitsi"
    ),
    # Schwierigkeitslabel anpassen für bessere Darstellung
    difficulty_label = case_when(
      difficulty == "Easy" ~ "Easy",
      difficulty == "Optimal_Selected" ~ "Optimal\n(Selected)",
      difficulty == "Optimal_Calibrated" ~ "Optimal\n(Calibrated)", 
      difficulty == "Hard" ~ "Hard"
    ),
    # Ordne Schwierigkeitsgrade
    difficulty_ordered = factor(difficulty_label, 
                               levels = c("Easy", "Optimal\n(Selected)", 
                                        "Optimal\n(Calibrated)", "Hard"))
  )

# Math Task Plot
p_math <- ggplot(math_plot_data, aes(x = difficulty_ordered, y = emmean, 
                                    color = communication, group = communication)) +
  geom_line(size = 1.2, alpha = 0.8) +
  geom_point(size = 3, alpha = 0.9) +
  geom_errorbar(aes(ymin = emmean - SE, ymax = emmean + SE), 
                width = 0.1, alpha = 0.7) +
  scale_color_manual(values = c("Chat" = "#E31A1C", "Jitsi" = "#1F78B4")) +
  labs(
    title = "Math Task: Flow Score by Communication and Difficulty",
    subtitle = "Estimated Marginal Means with Standard Errors",
    x = "Difficulty Level",
    y = "Estimated Flow Score",
    color = "Communication"
  ) +
  theme_minimal() +
  theme(
    plot.title = element_text(size = 14, face = "bold"),
    plot.subtitle = element_text(size = 12),
    axis.title = element_text(size = 12),
    axis.text = element_text(size = 10),
    legend.title = element_text(size = 12),
    legend.text = element_text(size = 11),
    panel.grid.minor = element_blank(),
    panel.grid.major.x = element_blank()
  ) +
  # Y-Achse anpassen
  scale_y_continuous(limits = c(
    min(math_plot_data$emmean - math_plot_data$SE) * 0.95,
    max(math_plot_data$emmean + math_plot_data$SE) * 1.05
  ))
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
print(p_math)

# ================================================================================
# TEIL 2: HIDDEN PROFILE TASK INTERACTION PLOT
# ================================================================================

print("--- HIDDEN PROFILE TASK INTERACTION PLOT ---")
## [1] "--- HIDDEN PROFILE TASK INTERACTION PLOT ---"
# Erstelle ggplot-kompatible Daten für HP Task
hp_plot_data <- marginal_means_hp %>%
  mutate(
    # Kommunikationslabel anpassen
    communication = case_when(
      synchronicity == "Low" ~ "Chat",
      synchronicity == "High" ~ "Jitsi"
    ),
    # Schwierigkeitslabel für HP Task (normalerweise Easy, Medium, Hard)
    difficulty_ordered = factor(difficulty, levels = unique(difficulty))
  )

# HP Task Plot
p_hp <- ggplot(hp_plot_data, aes(x = difficulty_ordered, y = emmean, 
                                color = communication, group = communication)) +
  geom_line(size = 1.2, alpha = 0.8) +
  geom_point(size = 3, alpha = 0.9) +
  geom_errorbar(aes(ymin = emmean - SE, ymax = emmean + SE), 
                width = 0.1, alpha = 0.7) +
  scale_color_manual(values = c("Chat" = "#E31A1C", "Jitsi" = "#1F78B4")) +
  labs(
    title = "Hidden Profile Task: Flow Score by Communication and Difficulty",
    subtitle = "Estimated Marginal Means with Standard Errors",
    x = "Difficulty Level",
    y = "Estimated Flow Score",
    color = "Communication"
  ) +
  theme_minimal() +
  theme(
    plot.title = element_text(size = 14, face = "bold"),
    plot.subtitle = element_text(size = 12),
    axis.title = element_text(size = 12),
    axis.text = element_text(size = 10),
    legend.title = element_text(size = 12),
    legend.text = element_text(size = 11),
    panel.grid.minor = element_blank(),
    panel.grid.major.x = element_blank()
  ) +
  # Y-Achse anpassen
  scale_y_continuous(limits = c(
    min(hp_plot_data$emmean - hp_plot_data$SE) * 0.95,
    max(hp_plot_data$emmean + hp_plot_data$SE) * 1.05
  ))

print(p_hp)

# ================================================================================
# TEIL 3: KOMBINIERTER PLOT (BEIDE TASKS)
# ================================================================================

print("--- KOMBINIERTER PLOT: BEIDE TASKS ---")
## [1] "--- KOMBINIERTER PLOT: BEIDE TASKS ---"
# Kombiniere beide Datensätze
combined_plot_data <- bind_rows(
  math_plot_data %>% mutate(task = "Math Task"),
  hp_plot_data %>% 
    mutate(task = "Hidden Profile Task",
           difficulty_label = as.character(difficulty),
           difficulty_ordered = factor(difficulty))
)

# Kombinierter Plot mit Facets
p_combined <- ggplot(combined_plot_data, aes(x = difficulty_ordered, y = emmean, 
                                           color = communication, group = communication)) +
  geom_line(size = 1.2, alpha = 0.8) +
  geom_point(size = 3, alpha = 0.9) +
  geom_errorbar(aes(ymin = emmean - SE, ymax = emmean + SE), 
                width = 0.1, alpha = 0.7) +
  facet_wrap(~ task, scales = "free_x") +
  scale_color_manual(values = c("Chat" = "#E31A1C", "Jitsi" = "#1F78B4")) +
  labs(
    title = "Flow Score by Communication and Difficulty Across Tasks",
    subtitle = "Estimated Marginal Means with Standard Errors",
    x = "Difficulty Level",
    y = "Estimated Flow Score",
    color = "Communication"
  ) +
  theme_minimal() +
  theme(
    plot.title = element_text(size = 14, face = "bold"),
    plot.subtitle = element_text(size = 12),
    axis.title = element_text(size = 12),
    axis.text = element_text(size = 10),
    axis.text.x = element_text(angle = 45, hjust = 1),
    legend.title = element_text(size = 12),
    legend.text = element_text(size = 11),
    strip.text = element_text(size = 12, face = "bold"),
    panel.grid.minor = element_blank(),
    panel.grid.major.x = element_blank()
  )

print(p_combined)

# ================================================================================
# TEIL 4: ALTERNATIVE DARSTELLUNG - PUNKTE MIT KONFIDENZINTERVALLEN
# ================================================================================

print("--- ALTERNATIVE: KONFIDENZINTERVALL-PLOT ---")
## [1] "--- ALTERNATIVE: KONFIDENZINTERVALL-PLOT ---"
# Math Task mit Konfidenzintervallen
p_math_ci <- ggplot(math_plot_data, aes(x = difficulty_ordered, y = emmean, 
                                       color = communication)) +
  geom_pointrange(aes(ymin = lower.CL, ymax = upper.CL), 
                  position = position_dodge(width = 0.3), size = 0.8) +
  geom_point(position = position_dodge(width = 0.3), size = 2.5) +
  scale_color_manual(values = c("Chat" = "#E31A1C", "Jitsi" = "#1F78B4")) +
  labs(
    title = "Math Task: Flow Score with 95% Confidence Intervals",
    x = "Difficulty Level",
    y = "Estimated Flow Score",
    color = "Communication"
  ) +
  theme_minimal() +
  theme(
    plot.title = element_text(size = 14, face = "bold"),
    axis.title = element_text(size = 12),
    axis.text = element_text(size = 10),
    legend.title = element_text(size = 12),
    legend.text = element_text(size = 11),
    panel.grid.minor = element_blank(),
    panel.grid.major.x = element_blank()
  )

print(p_math_ci)

# ================================================================================
# TEIL 5: INTERPRETATION HELPER
# ================================================================================

print("\n=== INTERPRETATION DER PLOTS ===")
## [1] "\n=== INTERPRETATION DER PLOTS ==="
# Berechne Unterschiede zwischen Chat und Jitsi für jede Schwierigkeitsstufe
print("--- Math Task: Chat vs. Jitsi Unterschiede pro Schwierigkeitsstufe ---")
## [1] "--- Math Task: Chat vs. Jitsi Unterschiede pro Schwierigkeitsstufe ---"
math_differences <- math_plot_data %>%
  dplyr::select(difficulty_ordered, communication, emmean) %>%
  pivot_wider(names_from = communication, values_from = emmean) %>%
  mutate(
    difference = Jitsi - Chat,
    interpretation = case_when(
      difference > 0.2 ~ "Jitsi deutlich höher",
      difference > 0.05 ~ "Jitsi leicht höher", 
      difference < -0.2 ~ "Chat deutlich höher",
      difference < -0.05 ~ "Chat leicht höher",
      TRUE ~ "Etwa gleich"
    )
  )

print(math_differences)
## # A tibble: 4 × 5
##   difficulty_ordered      Jitsi  Chat difference interpretation      
##   <fct>                   <dbl> <dbl>      <dbl> <chr>               
## 1 "Easy"                   5.63  5.37      0.264 Jitsi deutlich höher
## 2 "Optimal\n(Selected)"    5.89  5.42      0.468 Jitsi deutlich höher
## 3 "Optimal\n(Calibrated)"  5.74  5.55      0.187 Jitsi leicht höher  
## 4 "Hard"                   4.99  4.51      0.475 Jitsi deutlich höher
print("\n--- Hidden Profile Task: Chat vs. Jitsi Unterschiede pro Schwierigkeitsstufe ---")
## [1] "\n--- Hidden Profile Task: Chat vs. Jitsi Unterschiede pro Schwierigkeitsstufe ---"
hp_differences <- hp_plot_data %>%
  dplyr::select(difficulty_ordered, communication, emmean) %>%
  pivot_wider(names_from = communication, values_from = emmean) %>%
  mutate(
    difference = Jitsi - Chat,
    interpretation = case_when(
      difference > 0.2 ~ "Jitsi deutlich höher",
      difference > 0.05 ~ "Jitsi leicht höher",
      difference < -0.2 ~ "Chat deutlich höher", 
      difference < -0.05 ~ "Chat leicht höher",
      TRUE ~ "Etwa gleich"
    )
  )

print(hp_differences)
## # A tibble: 3 × 5
##   difficulty_ordered Jitsi  Chat difference interpretation    
##   <fct>              <dbl> <dbl>      <dbl> <chr>             
## 1 Easy                5.44  5.35     0.0938 Jitsi leicht höher
## 2 Medium              5.36  5.42    -0.0578 Chat leicht höher 
## 3 Hard                5.12  5.24    -0.121  Chat leicht höher
# Speichere Plots (optional)
# ggsave("math_task_interaction_plot.png", p_math, width = 10, height = 6, dpi = 300)
# ggsave("hp_task_interaction_plot.png", p_hp, width = 10, height = 6, dpi = 300)
# ggsave("combined_tasks_interaction_plot.png", p_combined, width = 12, height = 6, dpi = 300)

print("\n=== PLOTS ERSTELLT ===")
## [1] "\n=== PLOTS ERSTELLT ==="
print("Verfügbare Plot-Objekte:")
## [1] "Verfügbare Plot-Objekte:"
print("- p_math: Math Task Interaction Plot")
## [1] "- p_math: Math Task Interaction Plot"
print("- p_hp: Hidden Profile Task Interaction Plot") 
## [1] "- p_hp: Hidden Profile Task Interaction Plot"
print("- p_combined: Beide Tasks kombiniert")
## [1] "- p_combined: Beide Tasks kombiniert"
print("- p_math_ci: Math Task mit Konfidenzintervallen")
## [1] "- p_math_ci: Math Task mit Konfidenzintervallen"

Comparison of communication media (NONE/CHAT/JITSI)

# ================================================================================
# TEIL 1: DATENAUFBEREITUNG UND -ZUSAMMENFÜHRUNG
# ================================================================================

# Aktuelle Daten: Nur Math Task
current_data <- flow_clean %>%
  filter(task == "Math") %>%
  dplyr::select(participant.code, team_id, difficulty, comm, flow_score, order, fp_total) %>%
  # Spaltennamen für Konsistenz anpassen
  dplyr::rename(
    participant_id = participant.code,
    session_id = team_id
  ) %>%
  # Communication Medium als kategoriale Variable
  mutate(
    comm_type = case_when(
      comm == "Jitsi" ~ "Video",
      comm == "Chat" ~ "Chat"
    ),
    data_source = "Current"
  ) %>%
  dplyr::select(-comm)  # Original comm Spalte entfernen

print(sprintf("Aktuelle Daten: %d Beobachtungen, %d Teams, %d Teilnehmer",
              nrow(current_data),
              length(unique(current_data$session_id)),
              length(unique(current_data$participant_id))))
## [1] "Aktuelle Daten: 460 Beobachtungen, 40 Teams, 120 Teilnehmer"
# Historische Daten: Nur MP Treatment
historical_data_prep <- data_old %>%
  filter(Treatment == "MP") %>%  # Nur Multi-Player
  dplyr::select(SessionID, SubjectID, Condition, Order, flowFKS_9, flowProne.General)

# Erstelle Order-Strings für historische Daten (analog zu flow_clean)
print("Erstelle Order-Strings für historische Daten...")
## [1] "Erstelle Order-Strings für historische Daten..."
historical_order_mapping <- historical_data_prep %>%
  dplyr::select(SessionID, Condition, Order) %>%
  distinct() %>%
  # Condition zu Difficulty-Buchstaben mapping (entsprechend deinem Recode)
  mutate(
    difficulty_code = case_when(
      Condition == 1 ~ "B",  # Easy
      Condition == 2 ~ "A",  # Optimal_Selected
      Condition == 3 ~ "F",  # Optimal_Calibrated  
      Condition == 4 ~ "O"   # Hard
    )
  ) %>%
  # Sortiere nach Order um die richtige Reihenfolge zu bekommen
  arrange(SessionID, Order) %>%
  # Erstelle Order-String für jedes Team
  group_by(SessionID) %>%
  dplyr::summarise(
    order_string = paste0("['", paste(difficulty_code, collapse = "', '"), "']"),
    .groups = "drop"
  )
  # WICHTIG: Behalte SessionID hier bei!

print("Beispiel Order-Mappings:")
## [1] "Beispiel Order-Mappings:"
print(head(historical_order_mapping, 5))
## # A tibble: 5 × 2
##   SessionID     order_string        
##   <chr>         <chr>               
## 1 session_33-MP ['A', 'F', 'B', 'O']
## 2 session_38-MP ['O', 'B', 'F', 'A']
## 3 session_39-MP ['F', 'O', 'A', 'B']
## 4 session_41-MP ['A', 'F', 'B', 'O']
## 5 session_43-MP ['O', 'B', 'F', 'A']
# Jetzt die historischen Daten mit Order-Strings verknüpfen
historical_data <- historical_data_prep %>%
  # ERST das Join BEVOR du umbenennst!
  left_join(historical_order_mapping, by = "SessionID") %>%
  # DANN erst umbenennen
  dplyr::rename(
    session_id = SessionID,
    participant_id = SubjectID,
    condition_num = Condition,
    order_position = Order,
    flow_score = flowFKS_9,
    fp_total = flowProne.General
  ) %>%
  # Condition Numbers zu Difficulty Labels konvertieren (entsprechend deinem Mapping)
  mutate(
    difficulty = case_when(
      condition_num == 1 ~ "Easy",           # B
      condition_num == 2 ~ "Optimal_Selected",  # A
      condition_num == 3 ~ "Optimal_Calibrated", # F
      condition_num == 4 ~ "Hard"            # O
    ),
    comm_type = "None",  # Keine Kommunikation im alten Experiment
    data_source = "Historical"
  ) %>%
  # Order-String als order-Spalte verwenden (analog zu aktuellen Daten)
  dplyr::rename(order = order_string) %>%
  dplyr::select(-condition_num, -order_position)

print(sprintf("Historische Daten: %d Beobachtungen, %d Teams, %d Teilnehmer",
              nrow(historical_data),
              length(unique(historical_data$session_id)),
              length(unique(historical_data$participant_id))))
## [1] "Historische Daten: 422 Beobachtungen, 39 Teams, 109 Teilnehmer"
# Kombiniere beide Datensätze
combined_data <- bind_rows(current_data, historical_data) %>%
  # Faktoren für Modellierung erstellen
  mutate(
    difficulty = factor(difficulty, levels = c("Easy", "Optimal_Selected", "Optimal_Calibrated", "Hard")),
    comm_type = factor(comm_type, levels = c("None", "Chat", "Video")),
    data_source = factor(data_source, levels = c("Historical", "Current"))
  )

print(sprintf("Kombinierte Daten: %d Beobachtungen gesamt",
              nrow(combined_data)))
## [1] "Kombinierte Daten: 882 Beobachtungen gesamt"
# Überblick über die Datenverteilung
print("\n--- Datenverteilung nach Communication Type ---")
## [1] "\n--- Datenverteilung nach Communication Type ---"
print(table(combined_data$comm_type, combined_data$data_source))
##        
##         Historical Current
##   None         422       0
##   Chat           0     233
##   Video          0     227
print("\n--- Datenverteilung nach Difficulty ---")
## [1] "\n--- Datenverteilung nach Difficulty ---"
print(table(combined_data$difficulty, combined_data$comm_type))
##                     
##                      None Chat Video
##   Easy                105   60    56
##   Optimal_Selected    106   60    59
##   Optimal_Calibrated  106   60    59
##   Hard                105   53    53
# ================================================================================
# TEIL 2: DESKRIPTIVE STATISTIKEN
# ================================================================================

print("\n=== DESKRIPTIVE STATISTIKEN ===")
## [1] "\n=== DESKRIPTIVE STATISTIKEN ==="
# Grundstatistiken nach Communication Type
descriptive_stats <- combined_data %>%
  group_by(comm_type, data_source) %>%
  dplyr::summarise(
    n_obs = n(),
    n_teams = length(unique(session_id)),
    n_participants = length(unique(participant_id)),
    mean_flow = mean(flow_score, na.rm = TRUE),
    sd_flow = sd(flow_score, na.rm = TRUE),
    mean_fp = mean(fp_total, na.rm = TRUE),
    sd_fp = sd(fp_total, na.rm = TRUE),
    .groups = "drop"
  ) %>%
  mutate(across(where(is.numeric), ~ round(.x, 3)))

print("Deskriptive Statistiken nach Communication Type:")
## [1] "Deskriptive Statistiken nach Communication Type:"
print(descriptive_stats)
## # A tibble: 3 × 9
##   comm_type data_source n_obs n_teams n_participants mean_flow sd_flow mean_fp
##   <fct>     <fct>       <int>   <int>          <int>     <dbl>   <dbl>   <dbl>
## 1 None      Historical    422      39            109      4.63   0.889    3.92
## 2 Chat      Current       233      20             60      5.28   1.02     3.60
## 3 Video     Current       227      20             60      5.55   0.992    3.57
## # ℹ 1 more variable: sd_fp <dbl>
# Detaillierte Statistiken nach Difficulty
detailed_stats <- combined_data %>%
  group_by(comm_type, difficulty) %>%
  dplyr::summarise(
    n_obs = n(),
    mean_flow = mean(flow_score, na.rm = TRUE),
    sd_flow = sd(flow_score, na.rm = TRUE),
    .groups = "drop"
  ) %>%
  mutate(across(where(is.numeric), ~ round(.x, 3)))

print("\n--- Flow Scores nach Communication Type und Difficulty ---")
## [1] "\n--- Flow Scores nach Communication Type und Difficulty ---"
print(detailed_stats)
## # A tibble: 12 × 5
##    comm_type difficulty         n_obs mean_flow sd_flow
##    <fct>     <fct>              <int>     <dbl>   <dbl>
##  1 None      Easy                 105      4.77   0.822
##  2 None      Optimal_Selected     106      4.81   0.824
##  3 None      Optimal_Calibrated   106      4.88   0.904
##  4 None      Hard                 105      4.06   0.758
##  5 Chat      Easy                  60      5.41   0.888
##  6 Chat      Optimal_Selected      60      5.46   1.00 
##  7 Chat      Optimal_Calibrated    60      5.59   0.951
##  8 Chat      Hard                  53      4.60   0.969
##  9 Video     Easy                  56      5.63   0.942
## 10 Video     Optimal_Selected      59      5.85   0.930
## 11 Video     Optimal_Calibrated    59      5.70   0.917
## 12 Video     Hard                  53      4.98   0.985
# ================================================================================
# TEIL 3: REGRESSIONSMODELLE FÜR VERGLEICH
# ================================================================================

print("\n=== REGRESSIONSANALYSE - HISTORISCHER VERGLEICH ===")
## [1] "\n=== REGRESSIONSANALYSE - HISTORISCHER VERGLEICH ==="
# Modell 1: Nur Communication Type (Haupteffekt)
print("\n--- MODELL 1: Communication Type Haupteffekt ---")
## [1] "\n--- MODELL 1: Communication Type Haupteffekt ---"
model_historical_1 <- lmer(
  flow_score ~ comm_type + 
    (1 | session_id) + (1 | participant_id),
  data = combined_data
)
summary(model_historical_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ comm_type + (1 | session_id) + (1 | participant_id)
##    Data: combined_data
## 
## REML criterion at convergence: 2311.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3389 -0.5710  0.0590  0.6201  2.6274 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  participant_id (Intercept) 0.30692  0.5540  
##  session_id     (Intercept) 0.00738  0.0859  
##  Residual                   0.60079  0.7751  
## Number of obs: 882, groups:  participant_id, 229; session_id, 79
## 
## Fixed effects:
##                Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)     4.62175    0.06675 78.02362  69.241  < 2e-16 ***
## comm_typeChat   0.65871    0.11195 74.24577   5.884 1.07e-07 ***
## comm_typeVideo  0.91624    0.11245 75.32599   8.148 6.06e-12 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cmm_tC
## comm_typCht -0.596       
## comm_typeVd -0.594  0.354
# Modell 2: Communication Type + Difficulty
print("\n--- MODELL 2: Communication Type + Difficulty ---")
## [1] "\n--- MODELL 2: Communication Type + Difficulty ---"
model_historical_2 <- lmer(
  flow_score ~ comm_type + difficulty + 
    (1 | session_id) + (1 | participant_id),
  data = combined_data
)
summary(model_historical_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ comm_type + difficulty + (1 | session_id) + (1 |  
##     participant_id)
##    Data: combined_data
## 
## REML criterion at convergence: 2127
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -3.03276 -0.58578  0.03322  0.57770  2.68303 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  participant_id (Intercept) 0.34786  0.5898  
##  session_id     (Intercept) 0.01117  0.1057  
##  Residual                   0.44550  0.6675  
## Number of obs: 882, groups:  participant_id, 229; session_id, 79
## 
## Fixed effects:
##                               Estimate Std. Error        df t value Pr(>|t|)
## (Intercept)                    4.75384    0.07806 137.92907  60.898  < 2e-16
## comm_typeChat                  0.63815    0.11333  74.50500   5.631 3.01e-07
## comm_typeVideo                 0.89557    0.11373  75.40114   7.874 2.01e-11
## difficultyOptimal_Selected     0.08554    0.06336 647.86932   1.350   0.1775
## difficultyOptimal_Calibrated   0.11086    0.06341 649.10031   1.748   0.0809
## difficultyHard                -0.73146    0.06462 651.42090 -11.320  < 2e-16
##                                 
## (Intercept)                  ***
## comm_typeChat                ***
## comm_typeVideo               ***
## difficultyOptimal_Selected      
## difficultyOptimal_Calibrated .  
## difficultyHard               ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cmm_tC cmm_tV dffO_S dffO_C
## comm_typCht -0.518                            
## comm_typeVd -0.513  0.354                     
## dffcltyOp_S -0.409  0.001 -0.004              
## dffcltyOp_C -0.409  0.001 -0.005  0.506       
## diffcltyHrd -0.409  0.011  0.005  0.495  0.494
# Modell 3: Mit Interaktion
print("\n--- MODELL 3: Communication Type × Difficulty Interaktion ---")
## [1] "\n--- MODELL 3: Communication Type × Difficulty Interaktion ---"
model_historical_3 <- lmer(
  flow_score ~ comm_type * difficulty + 
    (1 | session_id) + (1 | participant_id),
  data = combined_data
)
summary(model_historical_3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ comm_type * difficulty + (1 | session_id) + (1 |  
##     participant_id)
##    Data: combined_data
## 
## REML criterion at convergence: 2134.2
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.90751 -0.56803  0.03288  0.56909  2.69442 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  participant_id (Intercept) 0.34873  0.5905  
##  session_id     (Intercept) 0.01154  0.1074  
##  Residual                   0.44520  0.6672  
## Number of obs: 882, groups:  participant_id, 229; session_id, 79
## 
## Fixed effects:
##                                               Estimate Std. Error         df
## (Intercept)                                   4.774423   0.088277 214.208893
## comm_typeChat                                 0.634836   0.146977 199.313992
## comm_typeVideo                                0.818021   0.149314 209.436471
## difficultyOptimal_Selected                    0.016517   0.092159 642.709495
## difficultyOptimal_Calibrated                  0.083417   0.092159 642.709510
## difficultyHard                               -0.717062   0.092446 643.972476
## comm_typeChat:difficultyOptimal_Selected      0.033483   0.152752 640.373648
## comm_typeVideo:difficultyOptimal_Selected     0.231366   0.155222 642.803433
## comm_typeChat:difficultyOptimal_Calibrated    0.096212   0.152752 640.373654
## comm_typeVideo:difficultyOptimal_Calibrated   0.008273   0.155525 645.839108
## comm_typeChat:difficultyHard                 -0.130638   0.156836 645.508566
## comm_typeVideo:difficultyHard                 0.071374   0.158576 645.860719
##                                             t value Pr(>|t|)    
## (Intercept)                                  54.084  < 2e-16 ***
## comm_typeChat                                 4.319 2.47e-05 ***
## comm_typeVideo                                5.479 1.22e-07 ***
## difficultyOptimal_Selected                    0.179    0.858    
## difficultyOptimal_Calibrated                  0.905    0.366    
## difficultyHard                               -7.757 3.43e-14 ***
## comm_typeChat:difficultyOptimal_Selected      0.219    0.827    
## comm_typeVideo:difficultyOptimal_Selected     1.491    0.137    
## comm_typeChat:difficultyOptimal_Calibrated    0.630    0.529    
## comm_typeVideo:difficultyOptimal_Calibrated   0.053    0.958    
## comm_typeChat:difficultyHard                 -0.833    0.405    
## comm_typeVideo:difficultyHard                 0.450    0.653    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cmm_tC cmm_tV dffO_S dffO_C dffclH c_C:O_S c_V:O_S c_C:O_C
## comm_typCht -0.601                                                           
## comm_typeVd -0.591  0.355                                                    
## dffcltyOp_S -0.525  0.315  0.310                                             
## dffcltyOp_C -0.525  0.315  0.310  0.504                                      
## diffcltyHrd -0.524  0.315  0.310  0.500  0.500                               
## cmm_tyC:O_S  0.317 -0.521 -0.187 -0.603 -0.304 -0.302                        
## cmm_tyV:O_S  0.312 -0.187 -0.532 -0.594 -0.299 -0.297  0.358                 
## cmm_tyC:O_C  0.317 -0.521 -0.187 -0.304 -0.603 -0.302  0.502   0.181         
## cmm_tyV:O_C  0.311 -0.187 -0.533 -0.299 -0.593 -0.296  0.180   0.511   0.358 
## cmm_typCh:H  0.309 -0.507 -0.183 -0.295 -0.295 -0.589  0.488   0.175   0.488 
## cmm_typVd:H  0.306 -0.184 -0.519 -0.291 -0.291 -0.583  0.176   0.499   0.176 
##             c_V:O_C cm_C:H
## comm_typCht               
## comm_typeVd               
## dffcltyOp_S               
## dffcltyOp_C               
## diffcltyHrd               
## cmm_tyC:O_S               
## cmm_tyV:O_S               
## cmm_tyC:O_C               
## cmm_tyV:O_C               
## cmm_typCh:H  0.175        
## cmm_typVd:H  0.498   0.344
# Modell 4: Vollständiges Modell mit Flow Proneness
print("\n--- MODELL 4: Vollständiges Modell mit Covariaten ---")
## [1] "\n--- MODELL 4: Vollständiges Modell mit Covariaten ---"
model_historical_4 <- lmer(
  flow_score ~ comm_type * difficulty + fp_total +
    (1 | session_id) + (1 | participant_id),
  data = combined_data
)
summary(model_historical_4)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ comm_type * difficulty + fp_total + (1 | session_id) +  
##     (1 | participant_id)
##    Data: combined_data
## 
## REML criterion at convergence: 2116.7
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.86124 -0.56787  0.01114  0.56651  2.73881 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  participant_id (Intercept) 0.29826  0.5461  
##  session_id     (Intercept) 0.02651  0.1628  
##  Residual                   0.44487  0.6670  
## Number of obs: 882, groups:  participant_id, 229; session_id, 79
## 
## Fixed effects:
##                                              Estimate Std. Error        df
## (Intercept)                                   2.51590    0.50237 226.59719
## comm_typeChat                                 0.82031    0.15194 211.85151
## comm_typeVideo                                1.02107    0.15512 223.29240
## difficultyOptimal_Selected                    0.01645    0.09211 643.52426
## difficultyOptimal_Calibrated                  0.08377    0.09211 643.52610
## difficultyHard                               -0.71642    0.09240 644.80237
## fp_total                                      0.57661    0.12626 221.13548
## comm_typeChat:difficultyOptimal_Selected      0.03355    0.15269 641.04962
## comm_typeVideo:difficultyOptimal_Selected     0.23463    0.15515 643.58293
## comm_typeChat:difficultyOptimal_Calibrated    0.09586    0.15269 641.05029
## comm_typeVideo:difficultyOptimal_Calibrated   0.01394    0.15545 646.60078
## comm_typeChat:difficultyHard                 -0.13810    0.15676 646.40021
## comm_typeVideo:difficultyHard                 0.07154    0.15848 646.95651
##                                             t value Pr(>|t|)    
## (Intercept)                                   5.008 1.11e-06 ***
## comm_typeChat                                 5.399 1.79e-07 ***
## comm_typeVideo                                6.582 3.26e-10 ***
## difficultyOptimal_Selected                    0.179    0.858    
## difficultyOptimal_Calibrated                  0.909    0.363    
## difficultyHard                               -7.754 3.49e-14 ***
## fp_total                                      4.567 8.22e-06 ***
## comm_typeChat:difficultyOptimal_Selected      0.220    0.826    
## comm_typeVideo:difficultyOptimal_Selected     1.512    0.131    
## comm_typeChat:difficultyOptimal_Calibrated    0.628    0.530    
## comm_typeVideo:difficultyOptimal_Calibrated   0.090    0.929    
## comm_typeChat:difficultyHard                 -0.881    0.379    
## comm_typeVideo:difficultyHard                 0.451    0.652    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation matrix not shown by default, as p = 13 > 12.
## Use print(value, correlation=TRUE)  or
##     vcov(value)        if you need it
# Modellvergleich
print("\n--- MODELLVERGLEICH ---")
## [1] "\n--- MODELLVERGLEICH ---"
historical_aic <- AIC(model_historical_1, model_historical_2, model_historical_3, model_historical_4)
historical_bic <- BIC(model_historical_1, model_historical_2, model_historical_3, model_historical_4)

print("AIC Vergleich:")
## [1] "AIC Vergleich:"
print(historical_aic)
##                    df      AIC
## model_historical_1  6 2323.894
## model_historical_2  9 2144.994
## model_historical_3 15 2164.189
## model_historical_4 16 2148.674
print("BIC Vergleich:")
## [1] "BIC Vergleich:"
print(historical_bic)
##                    df      BIC
## model_historical_1  6 2352.588
## model_historical_2  9 2188.034
## model_historical_3 15 2235.922
## model_historical_4 16 2225.189
# ================================================================================
# TEIL 4: EMMEANS POST-HOC TESTS
# ================================================================================

print("\n=== EMMEANS POST-HOC ANALYSEN - HISTORISCHER VERGLEICH ===")
## [1] "\n=== EMMEANS POST-HOC ANALYSEN - HISTORISCHER VERGLEICH ==="
# Bestes Modell für Post-Hoc Tests verwenden (anpassbar)
best_historical_model <- model_historical_4

# Communication Type Vergleiche (Paarweise)
print("\n--- Communication Type Paarvergleiche ---")
## [1] "\n--- Communication Type Paarvergleiche ---"
comm_comparisons <- emmeans(best_historical_model, specs = pairwise ~ comm_type, adjust = "tukey")$contrasts %>%
  as_tibble() %>%
  mutate(across(where(is.numeric), ~ round(.x, 4)))
## NOTE: Results may be misleading due to involvement in interactions
print("Communication Type Vergleiche:")
## [1] "Communication Type Vergleiche:"
print(comm_comparisons)
## # A tibble: 3 × 6
##   contrast     estimate    SE    df t.ratio  p.value
##   <chr>           <dbl> <dbl> <dbl>   <dbl>    <dbl>
## 1 None - Chat    -0.818 0.120  85.6   -6.82 3.92e- 9
## 2 None - Video   -1.10  0.122  89.5   -9.03 4.17e-10
## 3 Chat - Video   -0.283 0.129  72.1   -2.20 7.82e- 2
# Communication Type Vergleiche für jede Difficulty
print("\n--- Communication Type Vergleiche pro Difficulty ---")
## [1] "\n--- Communication Type Vergleiche pro Difficulty ---"
comm_by_difficulty <- emmeans(best_historical_model, specs = pairwise ~ comm_type|difficulty, adjust = "tukey")$contrasts %>%
  as_tibble() %>%
  mutate(across(where(is.numeric), ~ round(.x, 4)))

print("Communication Type Vergleiche nach Difficulty:")
## [1] "Communication Type Vergleiche nach Difficulty:"
print(comm_by_difficulty)
## # A tibble: 12 × 7
##    contrast     difficulty         estimate    SE    df t.ratio  p.value
##    <fct>        <fct>                 <dbl> <dbl> <dbl>   <dbl>    <dbl>
##  1 None - Chat  Easy                 -0.820 0.152  211.  -5.39  5.50e- 7
##  2 None - Video Easy                 -1.02  0.155  222.  -6.58  1.02e- 9
##  3 Chat - Video Easy                 -0.201 0.168  196.  -1.20  4.56e- 1
##  4 None - Chat  Optimal_Selected     -0.854 0.152  210.  -5.62  1.81e- 7
##  5 None - Video Optimal_Selected     -1.26  0.154  215.  -8.16  9.56e-14
##  6 Chat - Video Optimal_Selected     -0.402 0.166  191.  -2.42  4.35e- 2
##  7 None - Chat  Optimal_Calibrated   -0.916 0.152  210.  -6.03  2.18e- 8
##  8 None - Video Optimal_Calibrated   -1.04  0.154  216.  -6.72  4.68e-10
##  9 Chat - Video Optimal_Calibrated   -0.119 0.166  191.  -0.715 7.55e- 1
## 10 None - Chat  Hard                 -0.682 0.156  227.  -4.39  5.21e- 5
## 11 None - Video Hard                 -1.09  0.157  233.  -6.96  1.03e-10
## 12 Chat - Video Hard                 -0.410 0.173  218.  -2.37  4.83e- 2
# Marginal Means für Interpretation
print("\n--- Marginal Means: Communication Type × Difficulty ---")
## [1] "\n--- Marginal Means: Communication Type × Difficulty ---"
historical_marginal_means <- emmeans(best_historical_model, ~ comm_type * difficulty) %>%
  as_tibble() %>%
  mutate(across(where(is.numeric), ~ round(.x, 4)))
print(historical_marginal_means)
## # A tibble: 12 × 7
##    comm_type difficulty         emmean     SE    df lower.CL upper.CL
##    <fct>     <fct>               <dbl>  <dbl> <dbl>    <dbl>    <dbl>
##  1 None      Easy                 4.68 0.0906  224.     4.50     4.85
##  2 Chat      Easy                 5.50 0.119   194.     5.26     5.73
##  3 Video     Easy                 5.70 0.122   210.     5.46     5.94
##  4 None      Optimal_Selected     4.69 0.0903  222.     4.51     4.87
##  5 Chat      Optimal_Selected     5.55 0.119   194.     5.31     5.78
##  6 Video     Optimal_Selected     5.95 0.120   200.     5.71     6.19
##  7 None      Optimal_Calibrated   4.76 0.0903  222.     4.58     4.94
##  8 Chat      Optimal_Calibrated   5.68 0.119   194.     5.44     5.91
##  9 Video     Optimal_Calibrated   5.79 0.120   200.     5.56     6.03
## 10 None      Hard                 3.96 0.0905  224.     3.78     4.14
## 11 Chat      Hard                 4.64 0.123   221.     4.40     4.88
## 12 Video     Hard                 5.05 0.124   225.     4.81     5.30
# ================================================================================
# TEIL 5: SPEZIFISCHE VERGLEICHE
# ================================================================================

print("\n=== SPEZIFISCHE HISTORISCHE VERGLEICHE ===")
## [1] "\n=== SPEZIFISCHE HISTORISCHE VERGLEICHE ==="
# 1. None vs. Current Communication (Chat + Video combined)
print("\n--- None vs. Current Communication (kombiniert) ---")
## [1] "\n--- None vs. Current Communication (kombiniert) ---"
# Erstelle eine neue Gruppierungsvariable
combined_data_grouped <- combined_data %>%
  mutate(
    comm_era = case_when(
      comm_type == "None" ~ "Historical_NoComm",
      comm_type %in% c("Chat", "Video") ~ "Current_WithComm"
    )
  )

model_era_comparison <- lmer(
  flow_score ~ comm_era * difficulty + fp_total +
    (1 | session_id) + (1 | participant_id),
  data = combined_data_grouped
)

era_comparisons <- emmeans(model_era_comparison, specs = pairwise ~ comm_era, adjust = "none")$contrasts %>%
  as_tibble() %>%
  mutate(across(where(is.numeric), ~ round(.x, 4)))
## NOTE: Results may be misleading due to involvement in interactions
print("Historical vs. Current Era Vergleich:")
## [1] "Historical vs. Current Era Vergleich:"
print(era_comparisons)
## # A tibble: 1 × 6
##   contrast                             estimate    SE    df t.ratio  p.value
##   <chr>                                   <dbl> <dbl> <dbl>   <dbl>    <dbl>
## 1 Current_WithComm - Historical_NoComm    0.957 0.105  95.8    9.14 1.04e-14
# 2. Specific Focus: None vs Video, None vs Chat
print("\n--- Spezifische Paarvergleiche: None vs Chat vs Video ---")
## [1] "\n--- Spezifische Paarvergleiche: None vs Chat vs Video ---"
# Filtere für spezielle Vergleiche
specific_comparisons <- comm_comparisons %>%
  filter(
    grepl("None.*Chat|Chat.*None|None.*Video|Video.*None", contrast)
  )

if (nrow(specific_comparisons) > 0) {
  print("Wichtige Vergleiche (None vs. Kommunikationsmedien):")
  print(specific_comparisons)
}
## [1] "Wichtige Vergleiche (None vs. Kommunikationsmedien):"
## # A tibble: 2 × 6
##   contrast     estimate    SE    df t.ratio  p.value
##   <chr>           <dbl> <dbl> <dbl>   <dbl>    <dbl>
## 1 None - Chat    -0.818 0.120  85.6   -6.82 3.92e- 9
## 2 None - Video   -1.10  0.122  89.5   -9.03 4.17e-10
# ================================================================================
# TEIL 6: ZUSAMMENFASSUNG UND INTERPRETATION
# ================================================================================

print("\n=== ZUSAMMENFASSUNG HISTORISCHER VERGLEICH ===")
## [1] "\n=== ZUSAMMENFASSUNG HISTORISCHER VERGLEICH ==="
# Signifikante Communication Type Effekte
significant_comm_effects <- comm_comparisons %>%
  filter(p.value < 0.05)

if (nrow(significant_comm_effects) > 0) {
  print("Signifikante Communication Type Unterschiede:")
  print(significant_comm_effects %>% dplyr::select(contrast, estimate, p.value))
} else {
  print("Keine signifikanten Communication Type Unterschiede gefunden.")
}
## [1] "Signifikante Communication Type Unterschiede:"
## # A tibble: 2 × 3
##   contrast     estimate  p.value
##   <chr>           <dbl>    <dbl>
## 1 None - Chat    -0.818 3.92e- 9
## 2 None - Video   -1.10  4.17e-10
# Effect Sizes für Interpretation
print("\n--- Effect Sizes (Cohen's d approximation) ---")
## [1] "\n--- Effect Sizes (Cohen's d approximation) ---"
pooled_sd <- sd(combined_data$flow_score, na.rm = TRUE)

effect_sizes <- comm_comparisons %>%
  mutate(
    cohens_d = abs(estimate) / pooled_sd,
    effect_magnitude = case_when(
      cohens_d < 0.2 ~ "Negligible",
      cohens_d < 0.5 ~ "Small", 
      cohens_d < 0.8 ~ "Medium",
      TRUE ~ "Large"
    )
  ) %>%
  dplyr::select(contrast, estimate, cohens_d, effect_magnitude)

print("Effect Sizes für Communication Type Vergleiche:")
## [1] "Effect Sizes für Communication Type Vergleiche:"
print(effect_sizes)
## # A tibble: 3 × 4
##   contrast     estimate cohens_d effect_magnitude
##   <chr>           <dbl>    <dbl> <chr>           
## 1 None - Chat    -0.818    0.792 Medium          
## 2 None - Video   -1.10     1.07  Large           
## 3 Chat - Video   -0.283    0.274 Small

Comparision to single player treatment

# Vollständiger historischer Vergleich: Single Player vs Multi Player vs Current (Chat/Video)
# Vier-Weg Vergleich: Allein vs Zusammen (ohne Kommunikation) vs Chat vs Video

library(dplyr)
library(lme4)
library(emmeans)
library(tibble)

print("=== VOLLSTÄNDIGER HISTORISCHER VERGLEICH ===")
## [1] "=== VOLLSTÄNDIGER HISTORISCHER VERGLEICH ==="
print("Single Player vs Multi Player vs Current Communication")
## [1] "Single Player vs Multi Player vs Current Communication"
# ================================================================================
# TEIL 1: ERWEITERTE DATENAUFBEREITUNG
# ================================================================================

# Aktuelle Daten: Nur Math Task (bereits vorbereitet, aber nochmal für Vollständigkeit)
current_data_extended <- flow_clean %>%
  filter(task == "Math") %>%
  dplyr::select(participant.code, team_id, difficulty, comm, flow_score, order, fp_total) %>%
  dplyr::rename(
    participant_id = participant.code,
    session_id = team_id
  ) %>%
  mutate(
    communication_condition = case_when(
      comm == "Jitsi" ~ "Together_Video",
      comm == "Chat" ~ "Together_Chat"
    ),
    data_source = "Current"
  ) %>%
  dplyr::select(-comm)

print(sprintf("Aktuelle Daten: %d Beobachtungen, %d Teams, %d Teilnehmer",
              nrow(current_data_extended),
              length(unique(current_data_extended$session_id)),
              length(unique(current_data_extended$participant_id))))
## [1] "Aktuelle Daten: 460 Beobachtungen, 40 Teams, 120 Teilnehmer"
# Historische Daten: ALLE Treatments (SP + MP)
historical_data_prep_all <- data_old %>%
  dplyr::select(SessionID, SubjectID, Treatment, Condition, Order, flowFKS_9, flowProne.General)

# Order-Strings für ALLE historischen Teams erstellen
historical_order_mapping_all <- historical_data_prep_all %>%
  dplyr::select(SessionID, Condition, Order) %>%
  distinct() %>%
  mutate(
    difficulty_code = case_when(
      Condition == 1 ~ "B",  # Easy
      Condition == 2 ~ "A",  # Optimal_Selected
      Condition == 3 ~ "F",  # Optimal_Calibrated  
      Condition == 4 ~ "O"   # Hard
    )
  ) %>%
  arrange(SessionID, Order) %>%
  group_by(SessionID) %>%
  dplyr::summarise(
    order_string = paste0("['", paste(difficulty_code, collapse = "', '"), "']"),
    .groups = "drop"
  )

# Historische Daten mit beiden Treatments
historical_data_extended <- historical_data_prep_all %>%
  dplyr::rename(
    session_id = SessionID,
    participant_id = SubjectID,
    treatment = Treatment,
    condition_num = Condition,
    order_position = Order,
    flow_score = flowFKS_9,
    fp_total = flowProne.General
  ) %>%
  left_join(historical_order_mapping_all, by = c("session_id" = "SessionID")) %>%
  mutate(
    difficulty = case_when(
      condition_num == 1 ~ "Easy",
      condition_num == 2 ~ "Optimal_Selected", 
      condition_num == 3 ~ "Optimal_Calibrated",
      condition_num == 4 ~ "Hard"
    ),
    communication_condition = case_when(
      treatment == "SP" ~ "Alone",
      treatment == "MP" ~ "Together_None"
    ),
    data_source = "Historical"
  ) %>%
  dplyr::rename(order = order_string) %>%
  dplyr::select(-condition_num, -order_position, -treatment)

print(sprintf("Historische Daten: %d Beobachtungen, %d Teams, %d Teilnehmer",
              nrow(historical_data_extended),
              length(unique(historical_data_extended$session_id)),
              length(unique(historical_data_extended$participant_id))))
## [1] "Historische Daten: 570 Beobachtungen, 77 Teams, 147 Teilnehmer"
# Kombiniere ALLE Daten
comprehensive_data <- bind_rows(current_data_extended, historical_data_extended) %>%
  mutate(
    difficulty = factor(difficulty, levels = c("Easy", "Optimal_Selected", "Optimal_Calibrated", "Hard")),
    communication_condition = factor(communication_condition, 
                                   levels = c("Alone", "Together_None", "Together_Chat", "Together_Video")),
    data_source = factor(data_source, levels = c("Historical", "Current"))
  )

print(sprintf("Kombinierte Daten: %d Beobachtungen gesamt", nrow(comprehensive_data)))
## [1] "Kombinierte Daten: 1030 Beobachtungen gesamt"
# ================================================================================
# TEIL 2: UMFASSENDE DESKRIPTIVE STATISTIKEN
# ================================================================================

print("\n=== UMFASSENDE DESKRIPTIVE STATISTIKEN ===")
## [1] "\n=== UMFASSENDE DESKRIPTIVE STATISTIKEN ==="
# Grundstatistiken nach Communication Condition
comprehensive_stats <- comprehensive_data %>%
  group_by(communication_condition, data_source) %>%
  dplyr::summarise(
    n_obs = n(),
    n_sessions = length(unique(session_id)),
    n_participants = length(unique(participant_id)),
    mean_flow = mean(flow_score, na.rm = TRUE),
    sd_flow = sd(flow_score, na.rm = TRUE),
    median_flow = median(flow_score, na.rm = TRUE),
    mean_fp = mean(fp_total, na.rm = TRUE),
    sd_fp = sd(fp_total, na.rm = TRUE),
    .groups = "drop"
  ) %>%
  mutate(across(where(is.numeric), ~ round(.x, 3)))

print("Umfassende Statistiken nach Communication Condition:")
## [1] "Umfassende Statistiken nach Communication Condition:"
print(comprehensive_stats)
## # A tibble: 4 × 10
##   communication_condition data_source n_obs n_sessions n_participants mean_flow
##   <fct>                   <fct>       <int>      <int>          <int>     <dbl>
## 1 Alone                   Historical    148         38             38      4.76
## 2 Together_None           Historical    422         39            109      4.63
## 3 Together_Chat           Current       233         20             60      5.28
## 4 Together_Video          Current       227         20             60      5.55
## # ℹ 4 more variables: sd_flow <dbl>, median_flow <dbl>, mean_fp <dbl>,
## #   sd_fp <dbl>
# Vereinfachte Übersicht (kombiniert über data_source)
simple_stats <- comprehensive_data %>%
  group_by(communication_condition) %>%
  dplyr::summarise(
    n_total = n(),
    mean_flow = mean(flow_score, na.rm = TRUE),
    sd_flow = sd(flow_score, na.rm = TRUE),
    ci_lower = mean_flow - 1.96 * sd_flow / sqrt(n()),
    ci_upper = mean_flow + 1.96 * sd_flow / sqrt(n()),
    .groups = "drop"
  ) %>%
  mutate(across(where(is.numeric), ~ round(.x, 3)))

print("\nVereinfachte Übersicht (alle Conditions):")
## [1] "\nVereinfachte Übersicht (alle Conditions):"
print(simple_stats)
## # A tibble: 4 × 6
##   communication_condition n_total mean_flow sd_flow ci_lower ci_upper
##   <fct>                     <int>     <dbl>   <dbl>    <dbl>    <dbl>
## 1 Alone                       148      4.76   1.10      4.58     4.93
## 2 Together_None               422      4.63   0.889     4.55     4.72
## 3 Together_Chat               233      5.28   1.02      5.15     5.41
## 4 Together_Video              227      5.55   0.992     5.43     5.68
# Datenverteilung prüfen
print("\n--- Datenverteilung ---")
## [1] "\n--- Datenverteilung ---"
print("Nach Communication Condition:")
## [1] "Nach Communication Condition:"
print(table(comprehensive_data$communication_condition))
## 
##          Alone  Together_None  Together_Chat Together_Video 
##            148            422            233            227
print("\nNach Communication Condition und Data Source:")
## [1] "\nNach Communication Condition und Data Source:"
print(table(comprehensive_data$communication_condition, comprehensive_data$data_source))
##                 
##                  Historical Current
##   Alone                 148       0
##   Together_None         422       0
##   Together_Chat           0     233
##   Together_Video          0     227
print("\nNach Difficulty und Communication Condition:")
## [1] "\nNach Difficulty und Communication Condition:"
difficulty_table <- table(comprehensive_data$difficulty, comprehensive_data$communication_condition)
print(difficulty_table)
##                     
##                      Alone Together_None Together_Chat Together_Video
##   Easy                  35           105            60             56
##   Optimal_Selected      38           106            60             59
##   Optimal_Calibrated    37           106            60             59
##   Hard                  38           105            53             53
# ================================================================================
# TEIL 3: UMFASSENDE REGRESSIONSANALYSE
# ================================================================================

print("\n=== UMFASSENDE REGRESSIONSANALYSE ===")
## [1] "\n=== UMFASSENDE REGRESSIONSANALYSE ==="
# Modell 1: Nur Communication Condition
print("\n--- MODELL 1: Communication Condition Haupteffekt ---")
## [1] "\n--- MODELL 1: Communication Condition Haupteffekt ---"
model_comprehensive_1 <- lmer(
  flow_score ~ communication_condition + 
    (1 | session_id) + (1 | participant_id),
  data = comprehensive_data
)
summary(model_comprehensive_1)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ communication_condition + (1 | session_id) + (1 |  
##     participant_id)
##    Data: comprehensive_data
## 
## REML criterion at convergence: 2773.6
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -3.15975 -0.57294  0.08166  0.64901  2.50183 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  participant_id (Intercept) 0.287782 0.53645 
##  session_id     (Intercept) 0.007159 0.08461 
##  Residual                   0.662959 0.81422 
## Number of obs: 1030, groups:  participant_id, 267; session_id, 117
## 
## Fixed effects:
##                                       Estimate Std. Error       df t value
## (Intercept)                             4.7531     0.1107 253.3291  42.925
## communication_conditionTogether_None   -0.1306     0.1292 217.6379  -1.011
## communication_conditionTogether_Chat    0.5276     0.1424 182.7429   3.705
## communication_conditionTogether_Video   0.7863     0.1428 184.1700   5.506
##                                       Pr(>|t|)    
## (Intercept)                            < 2e-16 ***
## communication_conditionTogether_None   0.31295    
## communication_conditionTogether_Chat   0.00028 ***
## communication_conditionTogether_Video 1.22e-07 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cm_T_N cm_T_C
## cmmnctn_T_N -0.857              
## cmmnctn_T_C -0.778  0.667       
## cmmnctn_T_V -0.775  0.665  0.603
# Modell 2: Communication Condition + Difficulty
print("\n--- MODELL 2: Communication Condition + Difficulty ---")
## [1] "\n--- MODELL 2: Communication Condition + Difficulty ---"
model_comprehensive_2 <- lmer(
  flow_score ~ communication_condition + difficulty + 
    (1 | session_id) + (1 | participant_id),
  data = comprehensive_data
)
summary(model_comprehensive_2)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: 
## flow_score ~ communication_condition + difficulty + (1 | session_id) +  
##     (1 | participant_id)
##    Data: comprehensive_data
## 
## REML criterion at convergence: 2538.5
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.90958 -0.58012  0.04929  0.61656  2.51525 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  participant_id (Intercept) 0.33556  0.5793  
##  session_id     (Intercept) 0.01095  0.1046  
##  Residual                   0.48028  0.6930  
## Number of obs: 1030, groups:  participant_id, 267; session_id, 117
## 
## Fixed effects:
##                                        Estimate Std. Error        df t value
## (Intercept)                             4.86440    0.11772 317.51290  41.320
## communication_conditionTogether_None   -0.13730    0.13003 217.10223  -1.056
## communication_conditionTogether_Chat    0.49938    0.14352 181.92193   3.479
## communication_conditionTogether_Video   0.75707    0.14385 183.13293   5.263
## difficultyOptimal_Selected              0.11252    0.06101 758.77369   1.844
## difficultyOptimal_Calibrated            0.20395    0.06112 760.37008   3.337
## difficultyHard                         -0.74336    0.06202 762.47146 -11.985
##                                       Pr(>|t|)    
## (Intercept)                            < 2e-16 ***
## communication_conditionTogether_None  0.292165    
## communication_conditionTogether_Chat  0.000629 ***
## communication_conditionTogether_Video 3.94e-07 ***
## difficultyOptimal_Selected            0.065514 .  
## difficultyOptimal_Calibrated          0.000888 ***
## difficultyHard                         < 2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cm_T_N cm_T_C cm_T_V dffO_S dffO_C
## cmmnctn_T_N -0.811                                   
## cmmnctn_T_C -0.736  0.664                            
## cmmnctn_T_V -0.732  0.662  0.600                     
## dffcltyOp_S -0.268  0.005  0.005  0.002              
## dffcltyOp_C -0.265  0.003  0.004 -0.001  0.508       
## diffcltyHrd -0.267  0.005  0.013  0.009  0.499  0.498
# Modell 3: Mit Interaktion
print("\n--- MODELL 3: Communication Condition × Difficulty ---")
## [1] "\n--- MODELL 3: Communication Condition × Difficulty ---"
model_comprehensive_3 <- lmer(
  flow_score ~ communication_condition * difficulty + 
    (1 | session_id) + (1 | participant_id),
  data = comprehensive_data
)
summary(model_comprehensive_3)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: 
## flow_score ~ communication_condition * difficulty + (1 | session_id) +  
##     (1 | participant_id)
##    Data: comprehensive_data
## 
## REML criterion at convergence: 2530.1
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.90240 -0.57217  0.04536  0.58178  2.62642 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  participant_id (Intercept) 0.3399   0.5830  
##  session_id     (Intercept) 0.0113   0.1063  
##  Residual                   0.4685   0.6845  
## Number of obs: 1030, groups:  participant_id, 267; session_id, 117
## 
## Fixed effects:
##                                                                     Estimate
## (Intercept)                                                          4.68707
## communication_conditionTogether_None                                 0.08730
## communication_conditionTogether_Chat                                 0.72219
## communication_conditionTogether_Video                                0.90653
## difficultyOptimal_Selected                                           0.28370
## difficultyOptimal_Calibrated                                         0.77790
## difficultyHard                                                      -0.78356
## communication_conditionTogether_None:difficultyOptimal_Selected     -0.26664
## communication_conditionTogether_Chat:difficultyOptimal_Selected     -0.23370
## communication_conditionTogether_Video:difficultyOptimal_Selected    -0.03642
## communication_conditionTogether_None:difficultyOptimal_Calibrated   -0.69380
## communication_conditionTogether_Chat:difficultyOptimal_Calibrated   -0.59827
## communication_conditionTogether_Video:difficultyOptimal_Calibrated  -0.68705
## communication_conditionTogether_None:difficultyHard                  0.06658
## communication_conditionTogether_Chat:difficultyHard                 -0.06335
## communication_conditionTogether_Video:difficultyHard                 0.13790
##                                                                    Std. Error
## (Intercept)                                                           0.15124
## communication_conditionTogether_None                                  0.17551
## communication_conditionTogether_Chat                                  0.19213
## communication_conditionTogether_Video                                 0.19400
## difficultyOptimal_Selected                                            0.16112
## difficultyOptimal_Calibrated                                          0.16244
## difficultyHard                                                        0.16112
## communication_conditionTogether_None:difficultyOptimal_Selected       0.18681
## communication_conditionTogether_Chat:difficultyOptimal_Selected       0.20391
## communication_conditionTogether_Video:difficultyOptimal_Selected      0.20585
## communication_conditionTogether_None:difficultyOptimal_Calibrated     0.18795
## communication_conditionTogether_Chat:difficultyOptimal_Calibrated     0.20495
## communication_conditionTogether_Video:difficultyOptimal_Calibrated    0.20712
## communication_conditionTogether_None:difficultyHard                   0.18695
## communication_conditionTogether_Chat:difficultyHard                   0.20699
## communication_conditionTogether_Video:difficultyHard                  0.20838
##                                                                           df
## (Intercept)                                                        696.59468
## communication_conditionTogether_None                               600.66816
## communication_conditionTogether_Chat                               505.66137
## communication_conditionTogether_Video                              516.87035
## difficultyOptimal_Selected                                         751.88856
## difficultyOptimal_Calibrated                                       754.10185
## difficultyHard                                                     751.88856
## communication_conditionTogether_None:difficultyOptimal_Selected    751.50526
## communication_conditionTogether_Chat:difficultyOptimal_Selected    749.65229
## communication_conditionTogether_Video:difficultyOptimal_Selected   751.37977
## communication_conditionTogether_None:difficultyOptimal_Calibrated  753.16314
## communication_conditionTogether_Chat:difficultyOptimal_Calibrated  751.06505
## communication_conditionTogether_Video:difficultyOptimal_Calibrated 754.88128
## communication_conditionTogether_None:difficultyHard                751.88370
## communication_conditionTogether_Chat:difficultyHard                753.05625
## communication_conditionTogether_Video:difficultyHard               753.32707
##                                                                    t value
## (Intercept)                                                         30.991
## communication_conditionTogether_None                                 0.497
## communication_conditionTogether_Chat                                 3.759
## communication_conditionTogether_Video                                4.673
## difficultyOptimal_Selected                                           1.761
## difficultyOptimal_Calibrated                                         4.789
## difficultyHard                                                      -4.863
## communication_conditionTogether_None:difficultyOptimal_Selected     -1.427
## communication_conditionTogether_Chat:difficultyOptimal_Selected     -1.146
## communication_conditionTogether_Video:difficultyOptimal_Selected    -0.177
## communication_conditionTogether_None:difficultyOptimal_Calibrated   -3.691
## communication_conditionTogether_Chat:difficultyOptimal_Calibrated   -2.919
## communication_conditionTogether_Video:difficultyOptimal_Calibrated  -3.317
## communication_conditionTogether_None:difficultyHard                  0.356
## communication_conditionTogether_Chat:difficultyHard                 -0.306
## communication_conditionTogether_Video:difficultyHard                 0.662
##                                                                    Pr(>|t|)    
## (Intercept)                                                         < 2e-16 ***
## communication_conditionTogether_None                               0.619064    
## communication_conditionTogether_Chat                               0.000191 ***
## communication_conditionTogether_Video                              3.79e-06 ***
## difficultyOptimal_Selected                                         0.078686 .  
## difficultyOptimal_Calibrated                                       2.02e-06 ***
## difficultyHard                                                     1.41e-06 ***
## communication_conditionTogether_None:difficultyOptimal_Selected    0.153889    
## communication_conditionTogether_Chat:difficultyOptimal_Selected    0.252122    
## communication_conditionTogether_Video:difficultyOptimal_Selected   0.859598    
## communication_conditionTogether_None:difficultyOptimal_Calibrated  0.000239 ***
## communication_conditionTogether_Chat:difficultyOptimal_Calibrated  0.003615 ** 
## communication_conditionTogether_Video:difficultyOptimal_Calibrated 0.000953 ***
## communication_conditionTogether_None:difficultyHard                0.721847    
## communication_conditionTogether_Chat:difficultyHard                0.759647    
## communication_conditionTogether_Video:difficultyHard               0.508326    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(value, correlation=TRUE)  or
##     vcov(value)        if you need it
# Modell 4: Vollständiges Modell mit Flow Proneness
print("\n--- MODELL 4: Vollständiges Modell ---")
## [1] "\n--- MODELL 4: Vollständiges Modell ---"
model_comprehensive_4 <- lmer(
  flow_score ~ communication_condition * difficulty + fp_total +
    (1 | session_id) + (1 | participant_id),
  data = comprehensive_data
)
summary(model_comprehensive_4)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ communication_condition * difficulty + fp_total +  
##     (1 | session_id) + (1 | participant_id)
##    Data: comprehensive_data
## 
## REML criterion at convergence: 2502.9
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.87242 -0.55663  0.02927  0.59089  2.67833 
## 
## Random effects:
##  Groups         Name        Variance Std.Dev.
##  participant_id (Intercept) 0.27805  0.5273  
##  session_id     (Intercept) 0.02647  0.1627  
##  Residual                   0.46823  0.6843  
## Number of obs: 1030, groups:  participant_id, 267; session_id, 117
## 
## Fixed effects:
##                                                                     Estimate
## (Intercept)                                                          2.19527
## communication_conditionTogether_None                                 0.06707
## communication_conditionTogether_Chat                                 0.90832
## communication_conditionTogether_Video                                1.11214
## difficultyOptimal_Selected                                           0.28658
## difficultyOptimal_Calibrated                                         0.78014
## difficultyHard                                                      -0.78067
## fp_total                                                             0.64131
## communication_conditionTogether_None:difficultyOptimal_Selected     -0.26946
## communication_conditionTogether_Chat:difficultyOptimal_Selected     -0.23658
## communication_conditionTogether_Video:difficultyOptimal_Selected    -0.03564
## communication_conditionTogether_None:difficultyOptimal_Calibrated   -0.69542
## communication_conditionTogether_Chat:difficultyOptimal_Calibrated   -0.60051
## communication_conditionTogether_Video:difficultyOptimal_Calibrated  -0.68232
## communication_conditionTogether_None:difficultyHard                  0.06446
## communication_conditionTogether_Chat:difficultyHard                 -0.07417
## communication_conditionTogether_Video:difficultyHard                 0.13597
##                                                                    Std. Error
## (Intercept)                                                           0.46738
## communication_conditionTogether_None                                  0.17144
## communication_conditionTogether_Chat                                  0.19103
## communication_conditionTogether_Video                                 0.19343
## difficultyOptimal_Selected                                            0.16104
## difficultyOptimal_Calibrated                                          0.16234
## difficultyHard                                                        0.16104
## fp_total                                                              0.11414
## communication_conditionTogether_None:difficultyOptimal_Selected       0.18671
## communication_conditionTogether_Chat:difficultyOptimal_Selected       0.20382
## communication_conditionTogether_Video:difficultyOptimal_Selected      0.20575
## communication_conditionTogether_None:difficultyOptimal_Calibrated     0.18784
## communication_conditionTogether_Chat:difficultyOptimal_Calibrated     0.20485
## communication_conditionTogether_Video:difficultyOptimal_Calibrated    0.20699
## communication_conditionTogether_None:difficultyHard                   0.18686
## communication_conditionTogether_Chat:difficultyHard                   0.20689
## communication_conditionTogether_Video:difficultyHard                  0.20826
##                                                                           df
## (Intercept)                                                        284.90130
## communication_conditionTogether_None                               612.94770
## communication_conditionTogether_Chat                               511.00699
## communication_conditionTogether_Video                              520.18599
## difficultyOptimal_Selected                                         753.01177
## difficultyOptimal_Calibrated                                       755.42751
## difficultyHard                                                     753.01177
## fp_total                                                           257.56658
## communication_conditionTogether_None:difficultyOptimal_Selected    752.57779
## communication_conditionTogether_Chat:difficultyOptimal_Selected    750.57747
## communication_conditionTogether_Video:difficultyOptimal_Selected   752.46358
## communication_conditionTogether_None:difficultyOptimal_Calibrated  754.38973
## communication_conditionTogether_Chat:difficultyOptimal_Calibrated  752.11892
## communication_conditionTogether_Video:difficultyOptimal_Calibrated 756.11231
## communication_conditionTogether_None:difficultyHard                752.96742
## communication_conditionTogether_Chat:difficultyHard                754.16875
## communication_conditionTogether_Video:difficultyHard               754.65604
##                                                                    t value
## (Intercept)                                                          4.697
## communication_conditionTogether_None                                 0.391
## communication_conditionTogether_Chat                                 4.755
## communication_conditionTogether_Video                                5.750
## difficultyOptimal_Selected                                           1.780
## difficultyOptimal_Calibrated                                         4.806
## difficultyHard                                                      -4.848
## fp_total                                                             5.618
## communication_conditionTogether_None:difficultyOptimal_Selected     -1.443
## communication_conditionTogether_Chat:difficultyOptimal_Selected     -1.161
## communication_conditionTogether_Video:difficultyOptimal_Selected    -0.173
## communication_conditionTogether_None:difficultyOptimal_Calibrated   -3.702
## communication_conditionTogether_Chat:difficultyOptimal_Calibrated   -2.932
## communication_conditionTogether_Video:difficultyOptimal_Calibrated  -3.296
## communication_conditionTogether_None:difficultyHard                  0.345
## communication_conditionTogether_Chat:difficultyHard                 -0.358
## communication_conditionTogether_Video:difficultyHard                 0.653
##                                                                    Pr(>|t|)    
## (Intercept)                                                        4.11e-06 ***
## communication_conditionTogether_None                               0.695774    
## communication_conditionTogether_Chat                               2.59e-06 ***
## communication_conditionTogether_Video                              1.53e-08 ***
## difficultyOptimal_Selected                                         0.075542 .  
## difficultyOptimal_Calibrated                                       1.86e-06 ***
## difficultyHard                                                     1.52e-06 ***
## fp_total                                                           4.99e-08 ***
## communication_conditionTogether_None:difficultyOptimal_Selected    0.149392    
## communication_conditionTogether_Chat:difficultyOptimal_Selected    0.246101    
## communication_conditionTogether_Video:difficultyOptimal_Selected   0.862542    
## communication_conditionTogether_None:difficultyOptimal_Calibrated  0.000229 ***
## communication_conditionTogether_Chat:difficultyOptimal_Calibrated  0.003475 ** 
## communication_conditionTogether_Video:difficultyOptimal_Calibrated 0.001025 ** 
## communication_conditionTogether_None:difficultyHard                0.730200    
## communication_conditionTogether_Chat:difficultyHard                0.720071    
## communication_conditionTogether_Video:difficultyHard               0.514025    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation matrix not shown by default, as p = 17 > 12.
## Use print(value, correlation=TRUE)  or
##     vcov(value)        if you need it
# Modellvergleich
print("\n--- MODELLVERGLEICH ---")
## [1] "\n--- MODELLVERGLEICH ---"
comprehensive_aic <- AIC(model_comprehensive_1, model_comprehensive_2, model_comprehensive_3, model_comprehensive_4)
comprehensive_bic <- BIC(model_comprehensive_1, model_comprehensive_2, model_comprehensive_3, model_comprehensive_4)

print("AIC Vergleich:")
## [1] "AIC Vergleich:"
print(comprehensive_aic)
##                       df      AIC
## model_comprehensive_1  7 2787.647
## model_comprehensive_2 10 2558.510
## model_comprehensive_3 19 2568.093
## model_comprehensive_4 20 2542.909
print("BIC Vergleich:")
## [1] "BIC Vergleich:"
print(comprehensive_bic)
##                       df      BIC
## model_comprehensive_1  7 2822.208
## model_comprehensive_2 10 2607.883
## model_comprehensive_3 19 2661.901
## model_comprehensive_4 20 2641.655
# ================================================================================
# TEIL 4: EMMEANS PAARVERGLEICHE
# ================================================================================

print("\n=== EMMEANS PAARVERGLEICHE - ALLE CONDITIONS ===")
## [1] "\n=== EMMEANS PAARVERGLEICHE - ALLE CONDITIONS ==="
# Bestes Modell verwenden
best_comprehensive_model <- model_comprehensive_4

# Alle paarweisen Vergleiche zwischen Communication Conditions
print("\n--- Alle Communication Condition Paarvergleiche ---")
## [1] "\n--- Alle Communication Condition Paarvergleiche ---"
all_comm_comparisons <- emmeans(best_comprehensive_model, specs = pairwise ~ communication_condition, adjust = "tukey")$contrasts %>%
  as_tibble() %>%
  mutate(across(where(is.numeric), ~ round(.x, 4)))
## NOTE: Results may be misleading due to involvement in interactions
print("Alle Communication Condition Vergleiche:")
## [1] "Alle Communication Condition Vergleiche:"
print(all_comm_comparisons)
## # A tibble: 6 × 6
##   contrast                       estimate    SE    df t.ratio  p.value
##   <chr>                             <dbl> <dbl> <dbl>   <dbl>    <dbl>
## 1 Alone - Together_None             0.158 0.125 211.     1.27 5.86e- 1
## 2 Alone - Together_Chat            -0.680 0.142 179.    -4.78 2.13e- 5
## 3 Alone - Together_Video           -0.967 0.144 182.    -6.73 1.28e- 9
## 4 Together_None - Together_Chat    -0.839 0.117  89.8   -7.16 1.71e- 9
## 5 Together_None - Together_Video   -1.12  0.119  93.7   -9.46 4.87e-10
## 6 Together_Chat - Together_Video   -0.286 0.127  76.8   -2.26 1.18e- 1
# Spezifische Vergleiche von Interesse
print("\n--- Spezifische Vergleiche von Interesse ---")
## [1] "\n--- Spezifische Vergleiche von Interesse ---"
key_comparisons <- all_comm_comparisons %>%
  filter(
    grepl("Alone.*Together", contrast) |
    grepl("Together_None.*Together_Chat", contrast) |
    grepl("Together_None.*Together_Video", contrast) |
    grepl("Together_Chat.*Together_Video", contrast)
  )

if (nrow(key_comparisons) > 0) {
  print("Wichtige Vergleiche:")
  print(key_comparisons %>% dplyr::select(contrast, estimate, p.value))
}
## [1] "Wichtige Vergleiche:"
## # A tibble: 6 × 3
##   contrast                       estimate  p.value
##   <chr>                             <dbl>    <dbl>
## 1 Alone - Together_None             0.158 5.86e- 1
## 2 Alone - Together_Chat            -0.680 2.13e- 5
## 3 Alone - Together_Video           -0.967 1.28e- 9
## 4 Together_None - Together_Chat    -0.839 1.71e- 9
## 5 Together_None - Together_Video   -1.12  4.87e-10
## 6 Together_Chat - Together_Video   -0.286 1.18e- 1
# Marginal Means für alle Conditions
print("\n--- Marginal Means: Communication Conditions ---")
## [1] "\n--- Marginal Means: Communication Conditions ---"
comprehensive_marginal_means <- emmeans(best_comprehensive_model, ~ communication_condition) %>%
  as_tibble() %>%
  mutate(across(where(is.numeric), ~ round(.x, 4)))
## NOTE: Results may be misleading due to involvement in interactions
print(comprehensive_marginal_means)
## # A tibble: 4 × 6
##   communication_condition emmean     SE    df lower.CL upper.CL
##   <fct>                    <dbl>  <dbl> <dbl>    <dbl>    <dbl>
## 1 Alone                     4.68 0.107  258.      4.47     4.89
## 2 Together_None             4.52 0.0685  90.2     4.39     4.66
## 3 Together_Chat             5.36 0.0914  80.7     5.18     5.54
## 4 Together_Video            5.65 0.0929  84.6     5.46     5.83
# Communication Condition × Difficulty Marginal Means
print("\n--- Marginal Means: Communication × Difficulty ---")
## [1] "\n--- Marginal Means: Communication × Difficulty ---"
interaction_marginal_means <- emmeans(best_comprehensive_model, ~ communication_condition * difficulty) %>%
  as_tibble() %>%
  mutate(across(where(is.numeric), ~ round(.x, 4)))
print(interaction_marginal_means)
## # A tibble: 16 × 7
##    communication_condition difficulty      emmean     SE    df lower.CL upper.CL
##    <fct>                   <fct>            <dbl>  <dbl> <dbl>    <dbl>    <dbl>
##  1 Alone                   Easy              4.61 0.148   729.     4.32     4.90
##  2 Together_None           Easy              4.68 0.0898  251.     4.50     4.85
##  3 Together_Chat           Easy              5.52 0.119   220.     5.28     5.75
##  4 Together_Video          Easy              5.72 0.122   239.     5.48     5.96
##  5 Alone                   Optimal_Select…   4.90 0.143   687.     4.62     5.18
##  6 Together_None           Optimal_Select…   4.69 0.0896  249.     4.52     4.87
##  7 Together_Chat           Optimal_Select…   5.57 0.119   220.     5.33     5.80
##  8 Together_Video          Optimal_Select…   5.97 0.120   227.     5.74     6.21
##  9 Alone                   Optimal_Calibr…   5.39 0.145   701.     5.11     5.67
## 10 Together_None           Optimal_Calibr…   4.76 0.0895  249.     4.59     4.94
## 11 Together_Chat           Optimal_Calibr…   5.70 0.119   220.     5.46     5.93
## 12 Together_Video          Optimal_Calibr…   5.82 0.120   227.     5.58     6.06
## 13 Alone                   Hard              3.83 0.143   687.     3.55     4.11
## 14 Together_None           Hard              3.96 0.0897  251.     3.78     4.14
## 15 Together_Chat           Hard              4.66 0.124   252.     4.42     4.91
## 16 Together_Video          Hard              5.08 0.125   258.     4.83     5.32
# ================================================================================
# TEIL 5: EFFEKTGRÖSSEN UND PRAKTISCHE BEDEUTSAMKEIT
# ================================================================================

print("\n=== EFFEKTGRÖSSEN UND PRAKTISCHE BEDEUTSAMKEIT ===")
## [1] "\n=== EFFEKTGRÖSSEN UND PRAKTISCHE BEDEUTSAMKEIT ==="
# Berechne Effect Sizes (Cohen's d approximation)
pooled_sd_comprehensive <- sd(comprehensive_data$flow_score, na.rm = TRUE)

effect_sizes_comprehensive <- all_comm_comparisons %>%
  mutate(
    cohens_d = abs(estimate) / pooled_sd_comprehensive,
    effect_magnitude = case_when(
      cohens_d < 0.2 ~ "Vernachlässigbar",
      cohens_d < 0.5 ~ "Klein", 
      cohens_d < 0.8 ~ "Mittel",
      TRUE ~ "Groß"
    ),
    practical_significance = case_when(
      p.value < 0.001 & cohens_d >= 0.5 ~ "Hoch signifikant + praktisch relevant",
      p.value < 0.05 & cohens_d >= 0.5 ~ "Signifikant + praktisch relevant",
      p.value < 0.05 & cohens_d >= 0.2 ~ "Signifikant + kleiner Effekt",
      p.value >= 0.05 ~ "Nicht signifikant",
      TRUE ~ "Signifikant aber vernachlässigbare Effektgröße"
    )
  ) %>%
  dplyr::select(contrast, estimate, p.value, cohens_d, effect_magnitude, practical_significance)

print("Effect Sizes für alle Communication Condition Vergleiche:")
## [1] "Effect Sizes für alle Communication Condition Vergleiche:"
print(effect_sizes_comprehensive)
## # A tibble: 6 × 6
##   contrast    estimate  p.value cohens_d effect_magnitude practical_significance
##   <chr>          <dbl>    <dbl>    <dbl> <chr>            <chr>                 
## 1 Alone - To…    0.158 5.86e- 1    0.151 Vernachlässigbar Nicht signifikant     
## 2 Alone - To…   -0.680 2.13e- 5    0.650 Mittel           Hoch signifikant + pr…
## 3 Alone - To…   -0.967 1.28e- 9    0.923 Groß             Hoch signifikant + pr…
## 4 Together_N…   -0.839 1.71e- 9    0.800 Groß             Hoch signifikant + pr…
## 5 Together_N…   -1.12  4.87e-10    1.07  Groß             Hoch signifikant + pr…
## 6 Together_C…   -0.286 1.18e- 1    0.273 Klein            Nicht signifikant
# ================================================================================
# TEIL 6: ZUSAMMENFASSUNG UND INTERPRETATION
# ================================================================================

print("\n=== ZUSAMMENFASSUNG: VIER-WEG VERGLEICH ===")
## [1] "\n=== ZUSAMMENFASSUNG: VIER-WEG VERGLEICH ==="
# Flow Score Ranking
flow_ranking <- comprehensive_marginal_means %>%
  arrange(desc(emmean)) %>%
  dplyr::select(communication_condition, emmean, SE) %>%
  dplyr::mutate(
    rank = row_number(),
    condition_german = case_when(
      communication_condition == "Alone" ~ "Allein",
      communication_condition == "Together_None" ~ "Zusammen (keine Kommunikation)",
      communication_condition == "Together_Chat" ~ "Zusammen (Chat)",
      communication_condition == "Together_Video" ~ "Zusammen (Video)"
    )
  )

print("Flow Score Ranking (höchste zu niedrigste):")
## [1] "Flow Score Ranking (höchste zu niedrigste):"
print(flow_ranking %>% dplyr::select(rank, condition_german, emmean, SE))
## # A tibble: 4 × 4
##    rank condition_german               emmean     SE
##   <int> <chr>                           <dbl>  <dbl>
## 1     1 Zusammen (Video)                 5.65 0.0929
## 2     2 Zusammen (Chat)                  5.36 0.0914
## 3     3 Allein                           4.68 0.107 
## 4     4 Zusammen (keine Kommunikation)   4.52 0.0685
# Signifikante Unterschiede zusammenfassen
significant_comparisons <- effect_sizes_comprehensive %>%
  filter(p.value < 0.05) %>%
  arrange(p.value)

print("\nSignifikante Unterschiede (nach p-Wert sortiert):")
## [1] "\nSignifikante Unterschiede (nach p-Wert sortiert):"
if (nrow(significant_comparisons) > 0) {
  print(significant_comparisons %>% dplyr::select(contrast, estimate, p.value, effect_magnitude))
} else {
  print("Keine signifikanten Unterschiede gefunden.")
}
## # A tibble: 4 × 4
##   contrast                       estimate  p.value effect_magnitude
##   <chr>                             <dbl>    <dbl> <chr>           
## 1 Together_None - Together_Video   -1.12  4.87e-10 Groß            
## 2 Alone - Together_Video           -0.967 1.28e- 9 Groß            
## 3 Together_None - Together_Chat    -0.839 1.71e- 9 Groß            
## 4 Alone - Together_Chat            -0.680 2.13e- 5 Mittel
# Interaction Plots für Vollständigen Historischen Vergleich
# Vier-Weg Vergleich: Allein vs Zusammen (ohne Kommunikation) vs Chat vs Video
# ================================================================================

library(ggplot2)
library(emmeans)
library(RColorBrewer)

print("=== INTERACTION PLOTS FÜR VIER-WEG-VERGLEICH ===")
## [1] "=== INTERACTION PLOTS FÜR VIER-WEG-VERGLEICH ==="
# ================================================================================
# TEIL 1: HAUPTINTERAKTIONSPLOT - ALLE VIER CONDITIONS
# ================================================================================

print("--- HAUPTINTERAKTIONSPLOT: Alle Vier Communication Conditions ---")
## [1] "--- HAUPTINTERAKTIONSPLOT: Alle Vier Communication Conditions ---"
# Erstelle ggplot-kompatible Daten für den vollständigen Vergleich
comprehensive_plot_data <- interaction_marginal_means %>%
  mutate(
    # Kommunikationslabel anpassen
    communication_label = case_when(
      communication_condition == "Alone" ~ "Allein",
      communication_condition == "Together_None" ~ "Zusammen\n(keine Komm.)",
      communication_condition == "Together_Chat" ~ "Zusammen\n(Chat)",
      communication_condition == "Together_Video" ~ "Zusammen\n(Video)"
    ),
    # Schwierigkeitslabel anpassen für bessere Darstellung
    difficulty_label = case_when(
      difficulty == "Easy" ~ "Easy",
      difficulty == "Optimal_Selected" ~ "Optimal\n(Selected)",
      difficulty == "Optimal_Calibrated" ~ "Optimal\n(Calibrated)", 
      difficulty == "Hard" ~ "Hard"
    ),
    # Ordne Schwierigkeitsgrade
    difficulty_ordered = factor(difficulty_label, 
                               levels = c("Easy", "Optimal\n(Selected)", 
                                        "Optimal\n(Calibrated)", "Hard")),
    # Ordne Communication Conditions
    communication_ordered = factor(communication_label,
                                 levels = c("Allein", "Zusammen\n(keine Komm.)", 
                                          "Zusammen\n(Chat)", "Zusammen\n(Video)"))
  )

# Farbpalette für vier Bedingungen
color_palette <- c(
  "Allein" = "#E31A1C",                    # Rot
  "Zusammen\n(keine Komm.)" = "#FF7F00",  # Orange  
  "Zusammen\n(Chat)" = "#1F78B4",         # Blau
  "Zusammen\n(Video)" = "#33A02C"         # Grün
)

# Hauptplot: Alle vier Bedingungen
p_comprehensive <- ggplot(comprehensive_plot_data, 
                         aes(x = difficulty_ordered, y = emmean, 
                            color = communication_ordered, group = communication_ordered)) +
  geom_line(size = 1.3, alpha = 0.9) +
  geom_point(size = 3.5, alpha = 0.9) +
  geom_errorbar(aes(ymin = emmean - SE, ymax = emmean + SE), 
                width = 0.15, alpha = 0.7, size = 0.8) +
  scale_color_manual(values = color_palette) +
  labs(
    title = "Flow Score Across Communication Conditions and Difficulty Levels",
    subtitle = "Historical (Allein, Zusammen ohne Kommunikation) vs. Current (Chat, Video)\nEstimated Marginal Means with Standard Errors",
    x = "Difficulty Level",
    y = "Estimated Flow Score",
    color = "Communication\nCondition"
  ) +
  theme_minimal() +
  theme(
    plot.title = element_text(size = 16, face = "bold", hjust = 0.5),
    plot.subtitle = element_text(size = 12, hjust = 0.5),
    axis.title = element_text(size = 13),
    axis.text = element_text(size = 11),
    axis.text.x = element_text(size = 10),
    legend.title = element_text(size = 12, face = "bold"),
    legend.text = element_text(size = 10),
    panel.grid.minor = element_blank(),
    panel.grid.major.x = element_blank(),
    legend.position = "right"
  ) +
  # Y-Achse anpassen für bessere Sichtbarkeit
  scale_y_continuous(
    limits = c(
      min(comprehensive_plot_data$emmean - comprehensive_plot_data$SE) * 0.95,
      max(comprehensive_plot_data$emmean + comprehensive_plot_data$SE) * 1.05
    ),
    breaks = scales::pretty_breaks(n = 6)
  )

print(p_comprehensive)

ANOVA for treatment effects

# ================================================================================
   # UMFASSENDE ANOVA-ANALYSE
# ================================================================================

library(car)        # Für Levene-Test und Typ II/III ANOVA
## Lade nötiges Paket: carData
## Warning: Paket 'carData' wurde unter R Version 4.2.3 erstellt
## 
## Attache Paket: 'car'
## Das folgende Objekt ist maskiert 'package:psych':
## 
##     logit
## Das folgende Objekt ist maskiert 'package:dplyr':
## 
##     recode
## Das folgende Objekt ist maskiert 'package:purrr':
## 
##     some
library(effectsize) # Für Effektgrößen (eta-squared, omega-squared)
## Registered S3 methods overwritten by 'parameters':
##   method                           from      
##   display.parameters_distribution  datawizard
##   plot.parameters_distribution     datawizard
##   print_md.parameters_distribution datawizard
## 
## Attache Paket: 'effectsize'
## Das folgende Objekt ist maskiert 'package:psych':
## 
##     phi
library(multcomp)   # Für erweiterte Post-hoc Tests
## Lade nötiges Paket: mvtnorm
## 
## Attache Paket: 'mvtnorm'
## Das folgende Objekt ist maskiert 'package:effectsize':
## 
##     standardize
## Lade nötiges Paket: survival
## Lade nötiges Paket: TH.data
## 
## Attache Paket: 'TH.data'
## Das folgende Objekt ist maskiert 'package:MASS':
## 
##     geyser
library(nortest)    # Für erweiterte Normalitätstests

print("\n=== UMFASSENDE ANOVA-ANALYSE ===")
## [1] "\n=== UMFASSENDE ANOVA-ANALYSE ==="
# ================================================================================
# SCHRITT 1: ANNAHMEN-ÜBERPRÜFUNG
# ================================================================================

print("\n--- ANNAHMEN-ÜBERPRÜFUNG ---")
## [1] "\n--- ANNAHMEN-ÜBERPRÜFUNG ---"
# 1. Normalitätstest
print("\n1. NORMALITÄTSTEST")
## [1] "\n1. NORMALITÄTSTEST"
# Shapiro-Wilk Test (für kleinere Stichproben pro Gruppe)
normality_by_group <- comprehensive_data %>%
  group_by(communication_condition) %>%
  dplyr::summarise(
    n = n(),
    shapiro_w = ifelse(n >= 3 & n <= 5000, shapiro.test(flow_score)$statistic, NA),
    shapiro_p = ifelse(n >= 3 & n <= 5000, shapiro.test(flow_score)$p.value, NA),
    .groups = "drop"
  )

print("Normalitätstest pro Communication Condition:")
## [1] "Normalitätstest pro Communication Condition:"
print(normality_by_group)
## # A tibble: 4 × 4
##   communication_condition     n shapiro_w  shapiro_p
##   <fct>                   <int>     <dbl>      <dbl>
## 1 Alone                     148     0.982 0.0454    
## 2 Together_None             422     0.992 0.0282    
## 3 Together_Chat             233     0.975 0.000363  
## 4 Together_Video            227     0.955 0.00000161
# Anderson-Darling Test für größere Gruppen
print("\nAnderson-Darling Normalitätstest (gesamt):")
## [1] "\nAnderson-Darling Normalitätstest (gesamt):"
ad_test <- nortest::ad.test(comprehensive_data$flow_score)
print(paste("A =", round(ad_test$statistic, 4), ", p =", round(ad_test$p.value, 4)))
## [1] "A = 2.2108 , p = 0"
# QQ-Plot Daten für visuelle Inspektion vorbereiten
qq_data <- comprehensive_data %>%
  group_by(communication_condition) %>%
  arrange(flow_score) %>%
  dplyr::mutate(
    theoretical_quantile = qnorm(ppoints(n())),
    sample_quantile = flow_score
  ) %>%
  ungroup()

print("QQ-Plot Daten bereit für visuelle Inspektion")
## [1] "QQ-Plot Daten bereit für visuelle Inspektion"
# 2. Homogenität der Varianzen
print("\n2. HOMOGENITÄT DER VARIANZEN")
## [1] "\n2. HOMOGENITÄT DER VARIANZEN"
# Levene-Test
levene_test <- leveneTest(flow_score ~ communication_condition, data = comprehensive_data)
print("Levene-Test für Varianzhomogenität:")
## [1] "Levene-Test für Varianzhomogenität:"
print(levene_test)
## Levene's Test for Homogeneity of Variance (center = median)
##         Df F value  Pr(>F)  
## group    3  3.6903 0.01163 *
##       1026                  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Bartlett-Test (sensitiver für Normalitätsabweichungen)
bartlett_test <- bartlett.test(flow_score ~ communication_condition, data = comprehensive_data)
print("Bartlett-Test für Varianzhomogenität:")
## [1] "Bartlett-Test für Varianzhomogenität:"
print(paste("Chi-squared =", round(bartlett_test$statistic, 4), 
           ", df =", bartlett_test$parameter, 
           ", p =", round(bartlett_test$p.value, 4)))
## [1] "Chi-squared = 12.5756 , df = 3 , p = 0.0057"
# Deskriptive Statistiken pro Gruppe
variance_stats <- comprehensive_data %>%
  group_by(communication_condition) %>%
  dplyr::summarise(
    n = n(),
    mean = mean(flow_score, na.rm = TRUE),
    sd = sd(flow_score, na.rm = TRUE),
    variance = var(flow_score, na.rm = TRUE),
    min_val = min(flow_score, na.rm = TRUE),
    max_val = max(flow_score, na.rm = TRUE),
    .groups = "drop"
  ) %>%
  mutate(across(where(is.numeric) & !matches("n"), ~ round(.x, 3)))

print("\nDeskriptive Statistiken pro Communication Condition:")
## [1] "\nDeskriptive Statistiken pro Communication Condition:"
print(variance_stats)
## # A tibble: 4 × 7
##   communication_condition     n  mean    sd variance min_val max_val
##   <fct>                   <int> <dbl> <dbl>    <dbl>   <dbl>   <dbl>
## 1 Alone                     148  4.76 1.10     1.22     1.78    7   
## 2 Together_None             422  4.63 0.889    0.791    2.22    6.89
## 3 Together_Chat             233  5.28 1.02     1.04     3       7   
## 4 Together_Video            227  5.55 0.992    0.984    3.11    7
# Varianzenverhältnis prüfen (Faustregel: größte/kleinste Varianz < 4)
variance_ratio <- max(variance_stats$variance) / min(variance_stats$variance)
print(paste("Varianzenverhältnis (max/min):", round(variance_ratio, 3)))
## [1] "Varianzenverhältnis (max/min): 1.537"
print(paste("Homogenitätsannahme erfüllt (< 4):", variance_ratio < 4))
## [1] "Homogenitätsannahme erfüllt (< 4): TRUE"
# ================================================================================
# SCHRITT 2: EINFAKTORIELLE ANOVA - COMMUNICATION CONDITION
# ================================================================================

print("\n--- EINFAKTORIELLE ANOVA: COMMUNICATION CONDITION ---")
## [1] "\n--- EINFAKTORIELLE ANOVA: COMMUNICATION CONDITION ---"
# Standard ANOVA
anova_comm <- aov(flow_score ~ communication_condition, data = comprehensive_data)
anova_comm_summary <- summary(anova_comm)
print("Einfaktorielle ANOVA - Communication Condition:")
## [1] "Einfaktorielle ANOVA - Communication Condition:"
print(anova_comm_summary)
##                           Df Sum Sq Mean Sq F value Pr(>F)    
## communication_condition    3  154.8   51.59    54.3 <2e-16 ***
## Residuals               1026  974.8    0.95                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Typ II ANOVA (robuster bei unbalancierten Designs)
anova_comm_type2 <- Anova(anova_comm, type = "II")
print("\nTyp II ANOVA - Communication Condition:")
## [1] "\nTyp II ANOVA - Communication Condition:"
print(anova_comm_type2)
## Anova Table (Type II tests)
## 
## Response: flow_score
##                         Sum Sq   Df F value    Pr(>F)    
## communication_condition 154.77    3  54.298 < 2.2e-16 ***
## Residuals               974.79 1026                      
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Effektgrößen
eta_squared_comm <- eta_squared(anova_comm, partial = TRUE)
## For one-way between subjects designs, partial eta squared is equivalent
##   to eta squared. Returning eta squared.
omega_squared_comm <- omega_squared(anova_comm, partial = TRUE)
## For one-way between subjects designs, partial omega squared is
##   equivalent to omega squared. Returning omega squared.
print("\nEffektgrößen - Communication Condition:")
## [1] "\nEffektgrößen - Communication Condition:"
print("Eta-squared (partiell):")
## [1] "Eta-squared (partiell):"
print(eta_squared_comm)
## # Effect Size for ANOVA
## 
## Parameter               | Eta2 |       95% CI
## ---------------------------------------------
## communication_condition | 0.14 | [0.10, 1.00]
## 
## - One-sided CIs: upper bound fixed at [1.00].
print("Omega-squared (partiell):")
## [1] "Omega-squared (partiell):"
print(omega_squared_comm)
## # Effect Size for ANOVA
## 
## Parameter               | Omega2 |       95% CI
## -----------------------------------------------
## communication_condition |   0.13 | [0.10, 1.00]
## 
## - One-sided CIs: upper bound fixed at [1.00].
# ================================================================================
# SCHRITT 3: ZWEIFAKTORIELLE ANOVA - COMMUNICATION × DIFFICULTY  
# ================================================================================

print("\n--- ZWEIFAKTORIELLE ANOVA: COMMUNICATION × DIFFICULTY ---")
## [1] "\n--- ZWEIFAKTORIELLE ANOVA: COMMUNICATION × DIFFICULTY ---"
# Überprüfe Zellenbesetzung
cell_counts <- table(comprehensive_data$communication_condition, comprehensive_data$difficulty)
print("Zellenbesetzung (Communication × Difficulty):")
## [1] "Zellenbesetzung (Communication × Difficulty):"
print(cell_counts)
##                 
##                  Easy Optimal_Selected Optimal_Calibrated Hard
##   Alone            35               38                 37   38
##   Together_None   105              106                106  105
##   Together_Chat    60               60                 60   53
##   Together_Video   56               59                 59   53
# Identifiziere leere Zellen
empty_cells <- which(cell_counts == 0, arr.ind = TRUE)
if(nrow(empty_cells) > 0) {
  print("WARNUNG: Leere Zellen gefunden!")
  print(empty_cells)
} else {
  print("Keine leeren Zellen - Design ist balanciert genug für ANOVA")
}
## [1] "Keine leeren Zellen - Design ist balanciert genug für ANOVA"
# Zweifaktorielle ANOVA
anova_full <- aov(flow_score ~ communication_condition * difficulty, data = comprehensive_data)
anova_full_summary <- summary(anova_full)
print("\nZweifaktorielle ANOVA (Communication × Difficulty):")
## [1] "\nZweifaktorielle ANOVA (Communication × Difficulty):"
print(anova_full_summary)
##                                      Df Sum Sq Mean Sq F value Pr(>F)    
## communication_condition               3  154.8   51.59  63.500 <2e-16 ***
## difficulty                            3  139.0   46.34  57.036 <2e-16 ***
## communication_condition:difficulty    9   12.0    1.33   1.639 0.0997 .  
## Residuals                          1014  823.8    0.81                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Typ III ANOVA (für unbalancierte Designs mit Interaktionen)
anova_full_type3 <- Anova(anova_full, type = "III")
print("\nTyp III ANOVA - Communication × Difficulty:")
## [1] "\nTyp III ANOVA - Communication × Difficulty:"
print(anova_full_type3)
## Anova Table (Type III tests)
## 
## Response: flow_score
##                                    Sum Sq   Df  F value    Pr(>F)    
## (Intercept)                        774.72    1 953.5911 < 2.2e-16 ***
## communication_condition             38.09    3  15.6295 6.076e-10 ***
## difficulty                          47.77    3  19.6000 2.345e-12 ***
## communication_condition:difficulty  11.98    9   1.6388    0.0997 .  
## Residuals                          823.79 1014                       
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Effektgrößen für alle Faktoren
eta_squared_full <- eta_squared(anova_full, partial = TRUE)
omega_squared_full <- omega_squared(anova_full, partial = TRUE)

print("\nEffektgrößen - Vollständiges Modell:")
## [1] "\nEffektgrößen - Vollständiges Modell:"
print("Eta-squared (partiell):")
## [1] "Eta-squared (partiell):"
print(eta_squared_full)
## # Effect Size for ANOVA (Type I)
## 
## Parameter                          | Eta2 (partial) |       95% CI
## ------------------------------------------------------------------
## communication_condition            |           0.16 | [0.12, 1.00]
## difficulty                         |           0.14 | [0.11, 1.00]
## communication_condition:difficulty |           0.01 | [0.00, 1.00]
## 
## - One-sided CIs: upper bound fixed at [1.00].
print("Omega-squared (partiell):")
## [1] "Omega-squared (partiell):"
print(omega_squared_full)
## # Effect Size for ANOVA (Type I)
## 
## Parameter                          | Omega2 (partial) |       95% CI
## --------------------------------------------------------------------
## communication_condition            |             0.15 | [0.12, 1.00]
## difficulty                         |             0.14 | [0.11, 1.00]
## communication_condition:difficulty |         5.55e-03 | [0.00, 1.00]
## 
## - One-sided CIs: upper bound fixed at [1.00].
# ================================================================================
# SCHRITT 4: ANCOVA - MIT FLOW PRONENESS
# ================================================================================

print("\n--- ANCOVA: COMMUNICATION × DIFFICULTY + FLOW PRONENESS ---")
## [1] "\n--- ANCOVA: COMMUNICATION × DIFFICULTY + FLOW PRONENESS ---"
# ANCOVA mit Flow Proneness als Kovariate
ancova_model <- aov(flow_score ~ communication_condition * difficulty + fp_total, 
                   data = comprehensive_data)
ancova_summary <- summary(ancova_model)
print("ANCOVA mit Flow Proneness:")
## [1] "ANCOVA mit Flow Proneness:"
print(ancova_summary)
##                                      Df Sum Sq Mean Sq F value   Pr(>F)    
## communication_condition               3  154.8   51.59  67.354  < 2e-16 ***
## difficulty                            3  139.0   46.34  60.499  < 2e-16 ***
## fp_total                              1   47.8   47.84  62.456 7.08e-15 ***
## communication_condition:difficulty    9   12.1    1.34   1.749   0.0741 .  
## Residuals                          1013  775.9    0.77                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Typ III ANCOVA
ancova_type3 <- Anova(ancova_model, type = "III")
print("\nTyp III ANCOVA:")
## [1] "\nTyp III ANCOVA:"
print(ancova_type3)
## Anova Table (Type III tests)
## 
## Response: flow_score
##                                    Sum Sq   Df F value    Pr(>F)    
## (Intercept)                         35.48    1 46.3260 1.712e-11 ***
## communication_condition             56.90    3 24.7617 1.825e-15 ***
## difficulty                          47.75    3 20.7803 4.534e-13 ***
## fp_total                            47.91    1 62.5501 6.766e-15 ***
## communication_condition:difficulty  12.05    9  1.7487   0.07407 .  
## Residuals                          775.89 1013                      
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Effektgrößen ANCOVA
eta_squared_ancova <- eta_squared(ancova_model, partial = TRUE)
print("\nEffektgrößen ANCOVA:")
## [1] "\nEffektgrößen ANCOVA:"
print(eta_squared_ancova)
## # Effect Size for ANOVA (Type I)
## 
## Parameter                          | Eta2 (partial) |       95% CI
## ------------------------------------------------------------------
## communication_condition            |           0.17 | [0.13, 1.00]
## difficulty                         |           0.15 | [0.12, 1.00]
## fp_total                           |           0.06 | [0.04, 1.00]
## communication_condition:difficulty |           0.02 | [0.00, 1.00]
## 
## - One-sided CIs: upper bound fixed at [1.00].
# ================================================================================
# SCHRITT 5: POST-HOC TESTS
# ================================================================================

print("\n--- POST-HOC TESTS ---")
## [1] "\n--- POST-HOC TESTS ---"
# Tukey HSD für Communication Condition
print("\n1. TUKEY HSD - COMMUNICATION CONDITION")
## [1] "\n1. TUKEY HSD - COMMUNICATION CONDITION"
tukey_comm <- TukeyHSD(anova_comm)
print("Tukey HSD Ergebnisse:")
## [1] "Tukey HSD Ergebnisse:"
print(tukey_comm)
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = flow_score ~ communication_condition, data = comprehensive_data)
## 
## $communication_condition
##                                    diff         lwr       upr     p adj
## Together_None-Alone          -0.1251477 -0.36476249 0.1144672 0.5350530
## Together_Chat-Alone           0.5277327  0.26408920 0.7913761 0.0000019
## Together_Video-Alone          0.7980811  0.53308764 1.0630746 0.0000000
## Together_Chat-Together_None   0.6528803  0.44816526 0.8575954 0.0000000
## Together_Video-Together_None  0.9232288  0.71677800 1.1296796 0.0000000
## Together_Video-Together_Chat  0.2703485  0.03643706 0.5042598 0.0158891
# Tukey HSD für vollständiges Modell (falls Interaktion signifikant)
if(anova_full_summary[[1]][3, 5] < 0.05) {  # Interaktion p-Wert
  print("\n2. TUKEY HSD - COMMUNICATION × DIFFICULTY (da Interaktion signifikant)")
  tukey_full <- TukeyHSD(anova_full, which = "communication_condition:difficulty")
  print(tukey_full)
} else {
  print("\n2. Interaktion nicht signifikant - keine Post-hoc Tests für Interaktion nötig")
}
## [1] "\n2. Interaktion nicht signifikant - keine Post-hoc Tests für Interaktion nötig"
# Bonferroni-korrigierte paarweise t-Tests als Alternative
print("\n3. BONFERRONI-KORRIGIERTE PAARWEISE T-TESTS")
## [1] "\n3. BONFERRONI-KORRIGIERTE PAARWEISE T-TESTS"
pairwise_t <- pairwise.t.test(comprehensive_data$flow_score, 
                             comprehensive_data$communication_condition, 
                             p.adjust.method = "bonferroni")
print("Bonferroni-korrigierte paarweise t-Tests:")
## [1] "Bonferroni-korrigierte paarweise t-Tests:"
print(pairwise_t)
## 
##  Pairwise comparisons using t tests with pooled SD 
## 
## data:  comprehensive_data$flow_score and comprehensive_data$communication_condition 
## 
##                Alone   Together_None Together_Chat
## Together_None  1.000   -             -            
## Together_Chat  1.9e-06 4.1e-15       -            
## Together_Video 1.3e-13 < 2e-16       0.018        
## 
## P value adjustment method: bonferroni
# ================================================================================
# SCHRITT 6: ROBUSTHEITSANALYSEN
# ================================================================================

print("\n--- ROBUSTHEITSANALYSEN ---")
## [1] "\n--- ROBUSTHEITSANALYSEN ---"
# Welch-ANOVA (robust gegen Varianzheteroskedastizität)
print("\n1. WELCH-ANOVA (robust gegen ungleiche Varianzen)")
## [1] "\n1. WELCH-ANOVA (robust gegen ungleiche Varianzen)"
welch_anova <- oneway.test(flow_score ~ communication_condition, 
                          data = comprehensive_data, 
                          var.equal = FALSE)
print(welch_anova)
## 
##  One-way analysis of means (not assuming equal variances)
## 
## data:  flow_score and communication_condition
## F = 55.752, num df = 3.00, denom df = 432.71, p-value < 2.2e-16
# Kruskal-Wallis Test (nicht-parametrische Alternative)
print("\n2. KRUSKAL-WALLIS TEST (nicht-parametrisch)")
## [1] "\n2. KRUSKAL-WALLIS TEST (nicht-parametrisch)"
kruskal_test <- kruskal.test(flow_score ~ communication_condition, 
                           data = comprehensive_data)
print(kruskal_test)
## 
##  Kruskal-Wallis rank sum test
## 
## data:  flow_score by communication_condition
## Kruskal-Wallis chi-squared = 137.24, df = 3, p-value < 2.2e-16
# Bei signifikantem Kruskal-Wallis: Dunn-Test für Post-hoc
if(kruskal_test$p.value < 0.05) {
  print("\n3. DUNN-TEST (Post-hoc für Kruskal-Wallis)")
  # Vereinfachter paarweiser Wilcoxon-Test mit Bonferroni-Korrektur
  dunn_alternative <- pairwise.wilcox.test(comprehensive_data$flow_score, 
                                         comprehensive_data$communication_condition,
                                         p.adjust.method = "bonferroni")
  print("Paarweise Wilcoxon-Tests (Bonferroni-korrigiert):")
  print(dunn_alternative)
}
## [1] "\n3. DUNN-TEST (Post-hoc für Kruskal-Wallis)"
## [1] "Paarweise Wilcoxon-Tests (Bonferroni-korrigiert):"
## 
##  Pairwise comparisons using Wilcoxon rank sum test with continuity correction 
## 
## data:  comprehensive_data$flow_score and comprehensive_data$communication_condition 
## 
##                Alone   Together_None Together_Chat
## Together_None  0.5197  -             -            
## Together_Chat  0.0001  2.1e-13       -            
## Together_Video 9.2e-11 < 2e-16       0.0196       
## 
## P value adjustment method: bonferroni
# ================================================================================
# SCHRITT 7: MODELLVERGLEICH UND ZUSAMMENFASSUNG
# ================================================================================

print("\n--- MODELLVERGLEICH: ANOVA vs MIXED-EFFECTS ---")
## [1] "\n--- MODELLVERGLEICH: ANOVA vs MIXED-EFFECTS ---"
# AIC/BIC Vergleich zwischen ANOVA und Mixed-Effects Modellen
anova_lm <- lm(flow_score ~ communication_condition * difficulty + fp_total, 
               data = comprehensive_data)

print("Modellvergleich (AIC/BIC):")
## [1] "Modellvergleich (AIC/BIC):"
print("Mixed-Effects Modell (aus vorheriger Analyse):")
## [1] "Mixed-Effects Modell (aus vorheriger Analyse):"
print(paste("AIC:", round(AIC(model_comprehensive_4), 2)))
## [1] "AIC: 2542.91"
print(paste("BIC:", round(BIC(model_comprehensive_4), 2)))
## [1] "BIC: 2641.66"
print("Standard lineares Modell (ANCOVA):")
## [1] "Standard lineares Modell (ANCOVA):"
print(paste("AIC:", round(AIC(anova_lm), 2)))
## [1] "AIC: 2667.21"
print(paste("BIC:", round(BIC(anova_lm), 2)))
## [1] "BIC: 2756.08"
# R-squared für ANOVA-Modelle
r_squared_comm <- summary(lm(flow_score ~ communication_condition, data = comprehensive_data))$r.squared
r_squared_full <- summary(lm(flow_score ~ communication_condition * difficulty, data = comprehensive_data))$r.squared  
r_squared_ancova <- summary(anova_lm)$r.squared

print("\nErklärte Varianz (R²):")
## [1] "\nErklärte Varianz (R²):"
print(paste("Nur Communication Condition:", round(r_squared_comm, 4)))
## [1] "Nur Communication Condition: 0.137"
print(paste("Communication × Difficulty:", round(r_squared_full, 4)))
## [1] "Communication × Difficulty: 0.2707"
print(paste("ANCOVA (+ Flow Proneness):", round(r_squared_ancova, 4)))
## [1] "ANCOVA (+ Flow Proneness): 0.3131"
# ================================================================================
# SCHRITT 8: ZUSAMMENFASSUNG DER ANOVA-BEFUNDE
# ================================================================================

print("\n=== ZUSAMMENFASSUNG DER ANOVA-BEFUNDE ===")
## [1] "\n=== ZUSAMMENFASSUNG DER ANOVA-BEFUNDE ==="
# Hauptbefunde extrahieren
comm_f_stat <- anova_comm_summary[[1]][1, 4]  # F-Statistik
comm_p_val <- anova_comm_summary[[1]][1, 5]   # p-Wert
comm_eta_sq <- eta_squared_comm$Eta2_partial[1] # Eta-squared

print("HAUPTBEFUNDE:")
## [1] "HAUPTBEFUNDE:"
print(sprintf("1. Communication Condition Haupteffekt: F(%d,%d) = %.3f, p = %.4f, η²p = %.3f",
              anova_comm_summary[[1]][1, 1],  # df1
              anova_comm_summary[[1]][2, 1],  # df2  
              comm_f_stat, comm_p_val, comm_eta_sq))
## character(0)
if(comm_p_val < 0.001) {
  significance_level <- "hoch signifikant (p < .001)"
} else if(comm_p_val < 0.01) {
  significance_level <- "sehr signifikant (p < .01)"
} else if(comm_p_val < 0.05) {
  significance_level <- "signifikant (p < .05)"
} else {
  significance_level <- "nicht signifikant (p ≥ .05)"
}

effect_size_interpretation <- case_when(
  comm_eta_sq < 0.01 ~ "vernachlässigbar",
  comm_eta_sq < 0.06 ~ "klein", 
  comm_eta_sq < 0.14 ~ "mittel",
  TRUE ~ "groß"
)

print(sprintf("   Interpretation: %s mit %s Effekt", significance_level, effect_size_interpretation))
## character(0)
# Annahmen-Check Zusammenfassung
assumptions_met <- TRUE
assumption_violations <- c()

if(levene_test$`Pr(>F)`[1] < 0.05) {
  assumptions_met <- FALSE
  assumption_violations <- c(assumption_violations, "Varianzhomogenität verletzt")
}

if(ad_test$p.value < 0.05) {
  assumptions_met <- FALSE  
  assumption_violations <- c(assumption_violations, "Normalität verletzt")
}

print("\n2. ANNAHMEN-ÜBERPRÜFUNG:")
## [1] "\n2. ANNAHMEN-ÜBERPRÜFUNG:"
if(assumptions_met) {
  print("   Alle ANOVA-Annahmen erfüllt ✓")
  print("   → Parametrische ANOVA-Ergebnisse sind vertrauenswürdig")
} else {
  print(paste("   Verletzungen:", paste(assumption_violations, collapse = ", ")))
  print("   → Robustheitsanalysen (Welch-ANOVA, Kruskal-Wallis) beachten!")
}
## [1] "   Verletzungen: Varianzhomogenität verletzt, Normalität verletzt"
## [1] "   → Robustheitsanalysen (Welch-ANOVA, Kruskal-Wallis) beachten!"
# Konsistenz mit Mixed-Effects Check
print("\n3. KONSISTENZ MIT MIXED-EFFECTS MODELL:")
## [1] "\n3. KONSISTENZ MIT MIXED-EFFECTS MODELL:"
print("   (Detaillierte Vergleiche siehe vorherige Emmeans-Analyse)")
## [1] "   (Detaillierte Vergleiche siehe vorherige Emmeans-Analyse)"
print("   → Bei ähnlichen p-Werten: Befunde robust über Analysemethoden hinweg")
## [1] "   → Bei ähnlichen p-Werten: Befunde robust über Analysemethoden hinweg"
print("   → Bei unterschiedlichen Ergebnissen: Mixed-Effects bevorzugen (berücksichtigt Datenstruktur)")
## [1] "   → Bei unterschiedlichen Ergebnissen: Mixed-Effects bevorzugen (berücksichtigt Datenstruktur)"
print("\n=== ENDE DER ANOVA-ANALYSE ===")
## [1] "\n=== ENDE DER ANOVA-ANALYSE ==="

Shared flow calculation via Intra-class coefficient (univariate and multivariate)

# ================================================================================
# TEIL 1: UNIVARIATE ICCs (nach Task UND Kommunikationsmedium getrennt)
# ================================================================================

print("\n--- UNIVARIATE ICCs ---")
## [1] "\n--- UNIVARIATE ICCs ---"
# Daten aufteilen nach Task und Kommunikationsmedium
math_jitsi_data <- model_data_math %>% filter(comm == "Jitsi")
math_chat_data <- model_data_math %>% filter(comm == "Chat")
hp_jitsi_data <- model_data_hp %>% filter(comm == "Jitsi")
hp_chat_data <- model_data_hp %>% filter(comm == "Chat")

# Funktion für Univariate ICC (orientiert an deiner ursprünglichen Version)
compute_univariate_icc_simple <- function(data, name) {
  cat(sprintf("\nUnivariate ICC (%s):\n", name))
  cat(sprintf("Sample: %d Beobachtungen, %d Teams\n", nrow(data), length(unique(data$team_id))))
  
  # Verwende nur team_id als Random Effect (wie in deiner compute_icc Funktion)
  model <- lmer(flow_score ~ 1 + (1 | team_id), data = data)
  icc_result <- icc(model)
  
  print(icc_result)
  return(list(name = name, model = model, icc = icc_result))
}

# Berechne Univariate ICCs für alle Kombinationen
icc_math_jitsi <- compute_univariate_icc_simple(math_jitsi_data, "Math-Jitsi")
## 
## Univariate ICC (Math-Jitsi):
## Sample: 227 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.088
##   Unadjusted ICC: 0.088
icc_math_chat <- compute_univariate_icc_simple(math_chat_data, "Math-Chat")
## 
## Univariate ICC (Math-Chat):
## Sample: 233 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.136
##   Unadjusted ICC: 0.136
icc_hp_jitsi <- compute_univariate_icc_simple(hp_jitsi_data, "HP-Jitsi")
## 
## Univariate ICC (HP-Jitsi):
## Sample: 174 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.145
##   Unadjusted ICC: 0.145
icc_hp_chat <- compute_univariate_icc_simple(hp_chat_data, "HP-Chat")
## 
## Univariate ICC (HP-Chat):
## Sample: 171 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.334
##   Unadjusted ICC: 0.334
# ================================================================================
# TEIL 2: MULTIVARIATE ICCs nach Difficulty
# ================================================================================

print("\n--- MULTIVARIATE ICCs nach Difficulty separiert---")
## [1] "\n--- MULTIVARIATE ICCs nach Difficulty separiert---"
# Funktion für Multivariate ICC (wie deine ursprüngliche compute_icc Funktion)
compute_multivariate_icc_simple <- function(data, base_name) {
  cat(sprintf("\nMultivariate ICCs für %s (Separate ICC pro Difficulty):\n", base_name))
  
  # Verfügbare Difficulty Levels
  difficulties <- unique(data$difficulty)
  cat(sprintf("Difficulty Levels: %s\n", paste(difficulties, collapse = ", ")))
  
  results <- list()
  
  for (level in difficulties) {
    subset_data <- data %>% filter(difficulty == level)
    
    if (nrow(subset_data) > 0 && length(unique(subset_data$team_id)) > 1) {
      cat(sprintf("\n%s - %s:\n", base_name, level))
      cat(sprintf("Sample: %d Beobachtungen, %d Teams\n", 
                  nrow(subset_data), length(unique(subset_data$team_id))))
      
      model <- lmer(flow_score ~ 1 + (1 | team_id), data = subset_data)
      icc_result <- icc(model)
      print(icc_result)
      
      results[[level]] <- list(
        difficulty = level,
        model = model,
        icc = icc_result,
        n_obs = nrow(subset_data),
        n_teams = length(unique(subset_data$team_id))
      )
    } else {
      cat(sprintf("\n%s - %s: Nicht genügend Daten\n", base_name, level))
      results[[level]] <- NULL
    }
  }
  
  return(results)
}

# ================================================================================
# TEIL 2b: ECHTER MULTIVARIATE ICC
# ================================================================================

print("\n--- ECHTER MULTIVARIATE ICC ---")
## [1] "\n--- ECHTER MULTIVARIATE ICC ---"
# Funktion für echten Multivariate ICC mit multilevel::mult.icc
compute_true_multivariate_icc <- function(data, base_name) {
  cat(sprintf("\nEchter Multivariate ICC für %s:\n", base_name))
  
  # Prüfe verfügbare Daten
  cat(sprintf("Gesamt Sample: %d Beobachtungen, %d Teams\n", 
              nrow(data), length(unique(data$team_id))))
  
  difficulties <- unique(data$difficulty)
  cat(sprintf("Difficulty Levels: %s\n", paste(difficulties, collapse = ", ")))
  
  # Bereite Daten für mult.icc vor
  icc_data <- data %>%
    dplyr::select(flow_score, difficulty, team_id) %>%
    # Verwende explizite dplyr::rename
    dplyr::rename(val = flow_score, Condition = difficulty, SessionID = team_id) %>%
    filter(!is.na(val), !is.na(Condition), !is.na(SessionID))
  
  cat(sprintf("Nach Filterung: %d gültige Beobachtungen\n", nrow(icc_data)))
  
  tryCatch({
    # Multivariate ICC
    multivariate_result <- icc_data %>%
      group_by(Condition) %>%
      mutate(dummy = 1:n()) %>%  # Dummy Variable
      # Prüfe ob genug Daten pro Condition
      filter(n() >= 3, length(unique(SessionID)) >= 2) %>%
      do(multilevel::mult.icc(x = as.data.frame(.[, c("val", "dummy")]), .$SessionID)) %>%
      ungroup() %>%
      dplyr::select(-ICC2) %>%  # Nur ICC1 behalten
      spread(Condition, ICC1) %>%  # Von Long zu Wide
      filter(Variable != "dummy")  # Dummy-Zeile entfernen
    
    # Nur zeigen wenn Ergebnisse vorhanden
    if (!is.null(multivariate_result) && nrow(multivariate_result) > 0) {
      cat("Multivariate ICC Ergebnisse (ICC1 nach Difficulty):\n")
      print(multivariate_result)
    } else {
      cat("Keine gültigen Multivariate ICC Ergebnisse\n")
    }
    
    return(list(
      name = base_name,
      result = multivariate_result,
      raw_data = icc_data
    ))
    
  }, error = function(e) {
    cat(sprintf("Fehler bei Multivariate ICC Berechnung: %s\n", e$message))
    cat("Mögliche Gründe: Zu wenig Daten pro Condition oder zu wenig Teams\n")
    return(NULL)
  })
}

# Berechne separate univariate ICCs pro Difficulty
cat("\n=== SEPARATE ICCs PRO DIFFICULTY LEVEL ===")
## 
## === SEPARATE ICCs PRO DIFFICULTY LEVEL ===
multivariate_math_jitsi <- compute_multivariate_icc_simple(math_jitsi_data, "Math-Jitsi")
## 
## Multivariate ICCs für Math-Jitsi (Separate ICC pro Difficulty):
## Difficulty Levels: Optimal_Selected, Hard, Optimal_Calibrated, Easy
## 
## Math-Jitsi - Optimal_Selected:
## Sample: 59 Beobachtungen, 20 Teams
## boundary (singular) fit: see help('isSingular')
## Warning: Can't compute random effect variances. Some variance components equal
##   zero. Your model may suffer from singularity (see `?lme4::isSingular`
##   and `?performance::check_singularity`).
##   Decrease the `tolerance` level to force the calculation of random effect
##   variances, or impose priors on your random effects parameters (using
##   packages like `brms` or `glmmTMB`).
## [1] NA
## 
## Math-Jitsi - Hard:
## Sample: 53 Beobachtungen, 20 Teams
## boundary (singular) fit: see help('isSingular')
## Warning: Can't compute random effect variances. Some variance components equal
##   zero. Your model may suffer from singularity (see `?lme4::isSingular`
##   and `?performance::check_singularity`).
##   Decrease the `tolerance` level to force the calculation of random effect
##   variances, or impose priors on your random effects parameters (using
##   packages like `brms` or `glmmTMB`).
## [1] NA
## 
## Math-Jitsi - Optimal_Calibrated:
## Sample: 59 Beobachtungen, 20 Teams
## boundary (singular) fit: see help('isSingular')
## Warning: Can't compute random effect variances. Some variance components equal
##   zero. Your model may suffer from singularity (see `?lme4::isSingular`
##   and `?performance::check_singularity`).
##   Decrease the `tolerance` level to force the calculation of random effect
##   variances, or impose priors on your random effects parameters (using
##   packages like `brms` or `glmmTMB`).
## [1] NA
## 
## Math-Jitsi - Easy:
## Sample: 56 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.186
##   Unadjusted ICC: 0.186
multivariate_math_chat <- compute_multivariate_icc_simple(math_chat_data, "Math-Chat")
## 
## Multivariate ICCs für Math-Chat (Separate ICC pro Difficulty):
## Difficulty Levels: Optimal_Selected, Hard, Optimal_Calibrated, Easy
## 
## Math-Chat - Optimal_Selected:
## Sample: 60 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.003
##   Unadjusted ICC: 0.003
## 
## Math-Chat - Hard:
## Sample: 53 Beobachtungen, 20 Teams
## boundary (singular) fit: see help('isSingular')
## Warning: Can't compute random effect variances. Some variance components equal
##   zero. Your model may suffer from singularity (see `?lme4::isSingular`
##   and `?performance::check_singularity`).
##   Decrease the `tolerance` level to force the calculation of random effect
##   variances, or impose priors on your random effects parameters (using
##   packages like `brms` or `glmmTMB`).
## [1] NA
## 
## Math-Chat - Optimal_Calibrated:
## Sample: 60 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.171
##   Unadjusted ICC: 0.171
## 
## Math-Chat - Easy:
## Sample: 60 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.110
##   Unadjusted ICC: 0.110
multivariate_hp_jitsi <- compute_multivariate_icc_simple(hp_jitsi_data, "HP-Jitsi")
## 
## Multivariate ICCs für HP-Jitsi (Separate ICC pro Difficulty):
## Difficulty Levels: Easy, Medium, Hard
## 
## HP-Jitsi - Easy:
## Sample: 60 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.028
##   Unadjusted ICC: 0.028
## 
## HP-Jitsi - Medium:
## Sample: 58 Beobachtungen, 20 Teams
## boundary (singular) fit: see help('isSingular')
## Warning: Can't compute random effect variances. Some variance components equal
##   zero. Your model may suffer from singularity (see `?lme4::isSingular`
##   and `?performance::check_singularity`).
##   Decrease the `tolerance` level to force the calculation of random effect
##   variances, or impose priors on your random effects parameters (using
##   packages like `brms` or `glmmTMB`).
## [1] NA
## 
## HP-Jitsi - Hard:
## Sample: 56 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.071
##   Unadjusted ICC: 0.071
multivariate_hp_chat <- compute_multivariate_icc_simple(hp_chat_data, "HP-Chat")
## 
## Multivariate ICCs für HP-Chat (Separate ICC pro Difficulty):
## Difficulty Levels: Easy, Medium, Hard
## 
## HP-Chat - Easy:
## Sample: 58 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.347
##   Unadjusted ICC: 0.347
## 
## HP-Chat - Medium:
## Sample: 57 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.152
##   Unadjusted ICC: 0.152
## 
## HP-Chat - Hard:
## Sample: 56 Beobachtungen, 20 Teams
## # Intraclass Correlation Coefficient
## 
##     Adjusted ICC: 0.179
##   Unadjusted ICC: 0.179
# Berechne echte Multivariate ICCs
cat("\n=== ECHTER MULTIVARIATE ICC ===")
## 
## === ECHTER MULTIVARIATE ICC ===
true_multi_math_jitsi <- compute_true_multivariate_icc(math_jitsi_data, "Math-Jitsi")
## 
## Echter Multivariate ICC für Math-Jitsi:
## Gesamt Sample: 227 Beobachtungen, 20 Teams
## Difficulty Levels: Optimal_Selected, Hard, Optimal_Calibrated, Easy
## Nach Filterung: 227 gültige Beobachtungen
## Fehler bei Multivariate ICC Berechnung: Must only be used inside data-masking verbs like `mutate()`, `filter()`, and `group_by()`.
## Mögliche Gründe: Zu wenig Daten pro Condition oder zu wenig Teams
true_multi_math_chat <- compute_true_multivariate_icc(math_chat_data, "Math-Chat")
## 
## Echter Multivariate ICC für Math-Chat:
## Gesamt Sample: 233 Beobachtungen, 20 Teams
## Difficulty Levels: Optimal_Selected, Hard, Optimal_Calibrated, Easy
## Nach Filterung: 233 gültige Beobachtungen
## Fehler bei Multivariate ICC Berechnung: Must only be used inside data-masking verbs like `mutate()`, `filter()`, and `group_by()`.
## Mögliche Gründe: Zu wenig Daten pro Condition oder zu wenig Teams
true_multi_hp_jitsi <- compute_true_multivariate_icc(hp_jitsi_data, "HP-Jitsi")
## 
## Echter Multivariate ICC für HP-Jitsi:
## Gesamt Sample: 174 Beobachtungen, 20 Teams
## Difficulty Levels: Easy, Medium, Hard
## Nach Filterung: 174 gültige Beobachtungen
## Fehler bei Multivariate ICC Berechnung: Must only be used inside data-masking verbs like `mutate()`, `filter()`, and `group_by()`.
## Mögliche Gründe: Zu wenig Daten pro Condition oder zu wenig Teams
true_multi_hp_chat <- compute_true_multivariate_icc(hp_chat_data, "HP-Chat")
## 
## Echter Multivariate ICC für HP-Chat:
## Gesamt Sample: 171 Beobachtungen, 20 Teams
## Difficulty Levels: Easy, Medium, Hard
## Nach Filterung: 171 gültige Beobachtungen
## Fehler bei Multivariate ICC Berechnung: Must only be used inside data-masking verbs like `mutate()`, `filter()`, and `group_by()`.
## Mögliche Gründe: Zu wenig Daten pro Condition oder zu wenig Teams
# ================================================================================
# TEIL 3: ZUSAMMENFASSUNG UND VERGLEICHE
# ================================================================================

print("\n\n=== ZUSAMMENFASSUNG UND VERGLEICHE ===")
## [1] "\n\n=== ZUSAMMENFASSUNG UND VERGLEICHE ==="
# Extrahiere ICC-Werte für Vergleich (robust gegen verschiedene icc() Output-Formate)
extract_icc_value <- function(icc_result) {
  if (is.list(icc_result)) {
    # Versuche verschiedene Feldnamen
    if ("ICC_adjusted" %in% names(icc_result)) {
      return(icc_result$ICC_adjusted)
    } else if ("ICC_conditional" %in% names(icc_result)) {
      return(icc_result$ICC_conditional)  
    } else if (length(icc_result) > 0) {
      return(icc_result[[1]]) # Nimm den ersten Wert
    }
  } else if (is.numeric(icc_result)) {
    return(icc_result[1]) # Falls es ein numerischer Vektor ist
  }
  return(NA)
}

# Erstelle Vergleichstabelle für Univariate ICCs
univariate_comparison <- data.frame(
  Task_Communication = c("Math-Jitsi", "Math-Chat", "HP-Jitsi", "HP-Chat"),
  ICC_Value = c(
    extract_icc_value(icc_math_jitsi$icc),
    extract_icc_value(icc_math_chat$icc),
    extract_icc_value(icc_hp_jitsi$icc),
    extract_icc_value(icc_hp_chat$icc)
  ),
  N_Observations = c(
    nrow(math_jitsi_data),
    nrow(math_chat_data),
    nrow(hp_jitsi_data),
    nrow(hp_chat_data)
  ),
  N_Teams = c(
    length(unique(math_jitsi_data$team_id)),
    length(unique(math_chat_data$team_id)),
    length(unique(hp_jitsi_data$team_id)),
    length(unique(hp_chat_data$team_id))
  )
)

cat("\nUnivariate ICC Vergleich:\n")
## 
## Univariate ICC Vergleich:
print(univariate_comparison)
##   Task_Communication  ICC_Value N_Observations N_Teams
## 1         Math-Jitsi 0.08781621            227      20
## 2          Math-Chat 0.13580795            233      20
## 3           HP-Jitsi 0.14511088            174      20
## 4            HP-Chat 0.33373405            171      20
# Analysiere Unterschiede zwischen Kommunikationsmedien
cat("\nVergleich zwischen Kommunikationsmedien:\n")
## 
## Vergleich zwischen Kommunikationsmedien:
# Math Task Vergleich
math_jitsi_value <- univariate_comparison$ICC_Value[univariate_comparison$Task_Communication == "Math-Jitsi"]
math_chat_value <- univariate_comparison$ICC_Value[univariate_comparison$Task_Communication == "Math-Chat"]

if (!is.na(math_jitsi_value) && !is.na(math_chat_value)) {
  cat(sprintf("Math Task: Jitsi (%.4f) vs Chat (%.4f) - Differenz: %.4f\n", 
              math_jitsi_value, math_chat_value, math_jitsi_value - math_chat_value))
}
## Math Task: Jitsi (0.0878) vs Chat (0.1358) - Differenz: -0.0480
# HP Task Vergleich  
hp_jitsi_value <- univariate_comparison$ICC_Value[univariate_comparison$Task_Communication == "HP-Jitsi"]
hp_chat_value <- univariate_comparison$ICC_Value[univariate_comparison$Task_Communication == "HP-Chat"]

if (!is.na(hp_jitsi_value) && !is.na(hp_chat_value)) {
  cat(sprintf("HP Task: Jitsi (%.4f) vs Chat (%.4f) - Differenz: %.4f\n", 
              hp_jitsi_value, hp_chat_value, hp_jitsi_value - hp_chat_value))
}
## HP Task: Jitsi (0.1451) vs Chat (0.3337) - Differenz: -0.1886
# Erstelle Multivariate Zusammenfassung (beide Methoden)
create_multivariate_summary <- function(results, name) {
  cat(sprintf("\n%s - Separate ICCs nach Difficulty:\n", name))
  
  for (difficulty in names(results)) {
    result <- results[[difficulty]]
    if (!is.null(result)) {
      icc_val <- extract_icc_value(result$icc)
      if (!is.na(icc_val)) {
        cat(sprintf("  %s: ICC = %.4f (%d obs, %d teams)\n", 
                    difficulty, icc_val, result$n_obs, result$n_teams))
      }
    }
  }
}

create_multivariate_summary(multivariate_math_jitsi, "Math-Jitsi")
## 
## Math-Jitsi - Separate ICCs nach Difficulty:
##   Easy: ICC = 0.1864 (56 obs, 20 teams)
create_multivariate_summary(multivariate_math_chat, "Math-Chat")
## 
## Math-Chat - Separate ICCs nach Difficulty:
##   Optimal_Selected: ICC = 0.0025 (60 obs, 20 teams)
##   Optimal_Calibrated: ICC = 0.1713 (60 obs, 20 teams)
##   Easy: ICC = 0.1101 (60 obs, 20 teams)
create_multivariate_summary(multivariate_hp_jitsi, "HP-Jitsi")
## 
## HP-Jitsi - Separate ICCs nach Difficulty:
##   Easy: ICC = 0.0284 (60 obs, 20 teams)
##   Hard: ICC = 0.0712 (56 obs, 20 teams)
create_multivariate_summary(multivariate_hp_chat, "HP-Chat")
## 
## HP-Chat - Separate ICCs nach Difficulty:
##   Easy: ICC = 0.3473 (58 obs, 20 teams)
##   Medium: ICC = 0.1518 (57 obs, 20 teams)
##   Hard: ICC = 0.1786 (56 obs, 20 teams)
cat("\n=== ECHTE MULTIVARIATE ICC ERGEBNISSE ===\n")
## 
## === ECHTE MULTIVARIATE ICC ERGEBNISSE ===
show_true_multivariate <- function(result, name) {
  cat(sprintf("\n%s - Echter Multivariate ICC:\n", name))
  
  if (!is.null(result) && !is.null(result$result)) {
    if (nrow(result$result) > 0) {
      # Extrahiere und zeige die ICC-Werte ohne die Tabelle nochmal zu printen
      numeric_cols <- sapply(result$result, is.numeric)
      if (any(numeric_cols)) {
        cat("ICC1-Werte nach Difficulty:\n")
        for (col in names(result$result)[numeric_cols]) {
          val <- result$result[[col]][1]
          if (!is.na(val)) {
            cat(sprintf("  %s: ICC1 = %.4f\n", col, val))
          }
        }
      }
    } else {
      cat("Keine gültigen Ergebnisse verfügbar\n")
    }
  } else {
    cat("Multivariate ICC konnte nicht berechnet werden\n")
  }
}

if (!is.null(true_multi_math_jitsi)) show_true_multivariate(true_multi_math_jitsi, "Math-Jitsi")
if (!is.null(true_multi_math_chat)) show_true_multivariate(true_multi_math_chat, "Math-Chat")
if (!is.null(true_multi_hp_jitsi)) show_true_multivariate(true_multi_hp_jitsi, "HP-Jitsi")
if (!is.null(true_multi_hp_chat)) show_true_multivariate(true_multi_hp_chat, "HP-Chat")

Correlation of flow with anticipated mediators

# Master Thesis Analysis: Flow Mediation Analysis
# Getrennte Analysen für Math und Hidden Profile Tasks

library(dplyr)
library(tidyr)
library(lme4)
library(lmerTest)
library(rmcorr)
library(ggplot2)
library(psych)

# Konflikt zwischen plyr und dplyr lösen
summarise <- dplyr::summarise
mutate <- dplyr::mutate
select <- dplyr::select

# ================================================================================
# TEIL 1: DATENAUFBEREITUNG - NEUE MEDIATOREN
# ================================================================================

# Funktion zur Extraktion rundenweiser Mediatoren
extract_round_based_mediators <- function(data, var_pattern, var_name) {
  # Extrahiere alle relevanten Spalten
  med_data <- data %>%
    select(participant.code, contains("player")) %>%
    select(participant.code, matches(paste0("(mathJitsi|mathChat|HiddenProfile_Jitsi|HiddenProfile_Chat).*\\.", var_pattern, "$")))
  
  # Umstrukturierung in Long-Format
  med_long <- med_data %>%
    pivot_longer(cols = -participant.code, 
                 names_to = "variable", 
                 values_to = "value") %>%
    filter(!is.na(value)) %>%
    mutate(
      task = case_when(
        grepl("^math", variable) ~ "Math",
        grepl("^HiddenProfile", variable) ~ "HP"
      ),
      comm = case_when(
        grepl("Jitsi", variable) ~ "Jitsi",
        grepl("Chat", variable) ~ "Chat"
      ),
      round_raw = as.numeric(gsub(".*\\.(\\d+)\\.player.*", "\\1", variable)),
      # Standardisiere Rundennummern: Math 3-6 wird zu 1-4, HP 1-3 bleibt 1-3
      round = case_when(
        task == "Math" ~ round_raw - 2,  # Math: 3-6 wird zu 1-4
        task == "HP" ~ round_raw          # HP: 1-3 bleibt 1-3
      ),
      mediator = var_name
    ) %>%
    # Filtere nur die relevanten Runden
    filter((task == "Math" & round_raw >= 3 & round_raw <= 6) | 
           (task == "HP" & round_raw >= 1 & round_raw <= 3)) %>%
    select(participant.code, task, comm, round, mediator, value)
  
  return(med_long)
}

# Funktion zur Extraktion von Mediatoren mit mehreren Items
extract_multi_item_mediators <- function(data, items, var_name, round_based = TRUE) {
  if(round_based) {
    # Für rundenweise Mediatoren
    all_data <- data.frame()
    
    for(item in items) {
      item_data <- extract_round_based_mediators(data, item, paste0(var_name, "_", item))
      all_data <- bind_rows(all_data, item_data)
    }
    
    # Aggregiere über Items
    aggregated <- all_data %>%
      mutate(base_mediator = var_name) %>%
      group_by(participant.code, task, comm, round, base_mediator) %>%
      summarise(value = mean(value, na.rm = TRUE), .groups = "drop") %>%
      dplyr::rename(mediator = base_mediator)
    
    return(aggregated)
  } else {
    # Für einmalige Mediatoren (nur letzte Runde)
    med_data <- data %>%
      select(participant.code, contains("player"))
    
    # Erstelle Pattern für letzte Runde
    patterns <- c()
    for(item in items) {
      # Math: Runde 6, HP: Runde 3
      patterns <- c(patterns, 
                    paste0("mathJitsi\\.6\\.player\\.", item, "$"),
                    paste0("mathChat\\.6\\.player\\.", item, "$"),
                    paste0("HiddenProfile_Jitsi\\.3\\.player\\.", item, "$"),
                    paste0("HiddenProfile_Chat\\.3\\.player\\.", item, "$"))
    }
    
    # Kombiniere alle Patterns
    combined_pattern <- paste(patterns, collapse = "|")
    
    # Wähle relevante Spalten
    med_data <- med_data %>%
      select(participant.code, matches(combined_pattern))
    
    # Umstrukturierung
    med_long <- med_data %>%
      pivot_longer(cols = -participant.code, 
                   names_to = "variable", 
                   values_to = "value") %>%
      filter(!is.na(value)) %>%
      mutate(
        task = case_when(
          grepl("^math", variable) ~ "Math",
          grepl("^HiddenProfile", variable) ~ "HP"
        ),
        comm = case_when(
          grepl("Jitsi", variable) ~ "Jitsi",
          grepl("Chat", variable) ~ "Chat"
        ),
        item = gsub(".*\\.(\\w+)$", "\\1", variable)
      )
    
    # Aggregiere über Items
    aggregated <- med_long %>%
      group_by(participant.code, task, comm) %>%
      summarise(value = mean(value, na.rm = TRUE), .groups = "drop")
    
    return(aggregated)
  }
}

# ================================================================================
# STRUKTURELLE INTEGRATION
# ================================================================================

# Team Composition (nur einmal pro Aufgabe erhoben - letzte Runde)
tc_aggregated <- extract_multi_item_mediators(data, 
                                              c("tsz1", "tsz2", "tsz3", "td1", "td2", "td3", "tsc1", "tsc2", "tsc3"), 
                                              "team_composition", 
                                              round_based = FALSE)

# ================================================================================
# FUNKTIONALE INTEGRATION
# ================================================================================

# Information Sharing (info1, info2) - rundenweise
is_long <- extract_multi_item_mediators(data, c("info1", "info2"), "information_sharing", round_based = TRUE)

# Synchronization (ec1) - rundenweise
sync_long <- extract_round_based_mediators(data, "ec1", "synchronization")

# ================================================================================
# MOTIVATIONALE INTEGRATION
# ================================================================================

# Stress (is1-is5) - rundenweise
stress_long <- extract_multi_item_mediators(data, c("is1", "is2", "is3", "is4", "is5"), "stress", round_based = TRUE)

# Arousal - rundenweise
arousal_long <- extract_round_based_mediators(data, "arousal", "arousal")

# Valence (pleasure) - rundenweise
valence_long <- extract_round_based_mediators(data, "pleasure", "valence")

# Individual Motivation (tm1-tm3) - rundenweise
ind_motiv_long <- extract_multi_item_mediators(data, c("tm1", "tm2", "tm3"), "individual_motivation", round_based = TRUE)

# Team Motivation (te1-te3, nur einmal pro Aufgabe - letzte Runde)
team_motiv_aggregated <- extract_multi_item_mediators(data, c("te1", "te2", "te3"), "team_motivation", round_based = FALSE)

# ================================================================================
# FLOW SCORES VORBEREITEN
# ================================================================================

# Flow Scores aggregieren (falls noch nicht geschehen)
flow_aggregated <- flow_clean %>%
  group_by(participant.code, task, comm) %>%
  summarise(mean_flow_score = mean(flow_score, na.rm = TRUE), .groups = "drop")

# ================================================================================
# TEIL 2: REPEATED MEASURES KORRELATIONEN
# ================================================================================

# Funktion für rmcorr Analyse
perform_rmcorr <- function(data, mediator_name, task_filter) {
  # Filtere nach Task
  task_data <- data %>%
    filter(task == task_filter)
  
  # Flow Scores vorbereiten mit korrekten Rundennummern
  if(task_filter == "Math") {
    # Für Math: Wir brauchen nur die Runden, wo rundenweise Mediatoren gemessen wurden
    flow_round <- flow_clean %>%
      filter(task == task_filter) %>%
      group_by(participant.code, comm) %>%
      arrange(participant.code, comm) %>%
      mutate(round_raw = row_number()) %>%
      # Behalte nur Runden 3-6 (entspricht round 1-4 nach Transformation)
      filter(round_raw >= 3) %>%
      mutate(round = round_raw - 2) %>%
      ungroup()
  } else {
    # Für HP: Runden 1-3
    flow_round <- flow_clean %>%
      filter(task == task_filter) %>%
      group_by(participant.code, comm) %>%
      arrange(participant.code, comm) %>%
      mutate(round = row_number()) %>%
      filter(round <= 3) %>%
      ungroup()
  }
  
  # Merge mit Mediator-Daten
  merged_data <- task_data %>%
    left_join(flow_round %>% select(participant.code, comm, round, flow_score), 
              by = c("participant.code", "comm", "round")) %>%
    filter(!is.na(flow_score) & !is.na(value))
  
  # Berechne rmcorr für jede Kommunikationsbedingung
  results <- list()
  
  for(comm_type in c("Jitsi", "Chat")) {
    comm_data <- merged_data %>% filter(comm == comm_type)
    
    if(nrow(comm_data) > 10) {  # Genug Datenpunkte
      rmcorr_result <- rmcorr(participant = participant.code, 
                              measure1 = value, 
                              measure2 = flow_score, 
                              dataset = comm_data)
      
      results[[comm_type]] <- list(
        r = rmcorr_result$r,
        p = rmcorr_result$p,
        df = rmcorr_result$df,
        CI = rmcorr_result$CI
      )
    }
  }
  
  return(results)
}

# Führe rmcorr für alle rundenweisen Mediatoren durch
print("=== REPEATED MEASURES CORRELATIONS ===\n")
## [1] "=== REPEATED MEASURES CORRELATIONS ===\n"
# Liste aller rundenweisen Mediatoren
round_based_mediators <- list(
  "information_sharing" = is_long,
  "synchronization" = sync_long,
  "stress" = stress_long,
  "arousal" = arousal_long,
  "valence" = valence_long,
  "individual_motivation" = ind_motiv_long
)

# Durchführung für Math Task
print("--- MATH TASK ---")
## [1] "--- MATH TASK ---"
rmcorr_results_math <- list()
for(med_name in names(round_based_mediators)) {
  cat("\n", med_name, ":\n", sep = "")
  result <- perform_rmcorr(round_based_mediators[[med_name]], med_name, "Math")
  rmcorr_results_math[[med_name]] <- result
  
  # Ausgabe
  for(comm in names(result)) {
    cat("  ", comm, ": r = ", round(result[[comm]]$r, 3), 
        ", p = ", round(result[[comm]]$p, 3), 
        ", 95% CI [", round(result[[comm]]$CI[1], 3), ", ", 
        round(result[[comm]]$CI[2], 3), "]\n", sep = "")
  }
}
## 
## information_sharing:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = 0.007, p = 0.962, 95% CI [-0.269, 0.282]
##   Chat: r = -0.067, p = 0.63, 95% CI [-0.329, 0.204]
## 
## synchronization:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = 0.28, p = 0.046, 95% CI [0.005, 0.516]
##   Chat: r = -0.208, p = 0.13, 95% CI [-0.451, 0.063]
## 
## stress:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = -0.242, p = 0.088, 95% CI [-0.485, 0.036]
##   Chat: r = 0.088, p = 0.527, 95% CI [-0.184, 0.348]
## 
## arousal:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = 0.104, p = 0.467, 95% CI [-0.176, 0.369]
##   Chat: r = 0.26, p = 0.057, 95% CI [-0.008, 0.494]
## 
## valence:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = 0.16, p = 0.263, 95% CI [-0.121, 0.417]
##   Chat: r = -0.075, p = 0.588, 95% CI [-0.336, 0.196]
## 
## individual_motivation:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = 0.483, p = 0, 95% CI [0.239, 0.67]
##   Chat: r = 0.124, p = 0.37, 95% CI [-0.148, 0.38]
# Durchführung für HP Task
print("\n--- HIDDEN PROFILE TASK ---")
## [1] "\n--- HIDDEN PROFILE TASK ---"
rmcorr_results_hp <- list()
for(med_name in names(round_based_mediators)) {
  cat("\n", med_name, ":\n", sep = "")
  result <- perform_rmcorr(round_based_mediators[[med_name]], med_name, "HP")
  rmcorr_results_hp[[med_name]] <- result
  
  # Ausgabe
  for(comm in names(result)) {
    cat("  ", comm, ": r = ", round(result[[comm]]$r, 3), 
        ", p = ", round(result[[comm]]$p, 3), 
        ", 95% CI [", round(result[[comm]]$CI[1], 3), ", ", 
        round(result[[comm]]$CI[2], 3), "]\n", sep = "")
  }
}
## 
## information_sharing:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = -0.02, p = 0.834, 95% CI [-0.202, 0.164]
##   Chat: r = -0.02, p = 0.834, 95% CI [-0.205, 0.166]
## 
## synchronization:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = -0.084, p = 0.374, 95% CI [-0.263, 0.101]
##   Chat: r = -0.041, p = 0.666, 95% CI [-0.225, 0.145]
## 
## stress:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = -0.096, p = 0.309, 95% CI [-0.274, 0.089]
##   Chat: r = 0.012, p = 0.9, 95% CI [-0.174, 0.197]
## 
## arousal:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = -0.023, p = 0.811, 95% CI [-0.205, 0.161]
##   Chat: r = -0.049, p = 0.61, 95% CI [-0.232, 0.138]
## 
## valence:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = 0.114, p = 0.225, 95% CI [-0.071, 0.291]
##   Chat: r = 0.045, p = 0.639, 95% CI [-0.142, 0.228]
## 
## individual_motivation:
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 = value, measure2 =
## flow_score, : 'participant.code' coerced into a factor
##   Jitsi: r = 0.013, p = 0.89, 95% CI [-0.17, 0.196]
##   Chat: r = -0.171, p = 0.072, 95% CI [-0.345, 0.015]
# ================================================================================
# TEIL 3: LINEAR MIXED MODELS
# ================================================================================

# Funktion für LMM Analysen
perform_lmm_analysis <- function(mediator_data, mediator_name, task_filter, is_round_based = TRUE) {
  
  # Filtere nach Task
  task_data <- mediator_data %>%
    filter(task == task_filter)
  
  if(is_round_based) {
    # Für rundenweise Mediatoren
    # Modell 1: Kommunikationsmedium -> Mediator
    model1 <- lmer(value ~ comm + (1|participant.code) + (1|round), 
                   data = task_data)
    
    # Flow Scores mit korrekten Rundennummern vorbereiten
    if(task_filter == "Math") {
      flow_round <- flow_clean %>%
        filter(task == task_filter) %>%
        group_by(participant.code, comm) %>%
        arrange(participant.code, comm) %>%
        mutate(round_raw = row_number()) %>%
        filter(round_raw >= 3) %>%
        mutate(round = round_raw - 2) %>%
        ungroup()
    } else {
      flow_round <- flow_clean %>%
        filter(task == task_filter) %>%
        group_by(participant.code, comm) %>%
        arrange(participant.code, comm) %>%
        mutate(round = row_number()) %>%
        filter(round <= 3) %>%
        ungroup()
    }
    
    merged_data <- task_data %>%
      left_join(flow_round %>% select(participant.code, comm, round, flow_score), 
                by = c("participant.code", "comm", "round"))
    
    # Modell 2: Mediator -> Flow
    model2 <- lmer(flow_score ~ value + (1|participant.code) + (1|round), 
                   data = merged_data)
    
  } else {
    # Für nicht-rundenweise Mediatoren (rename value column für consistency)
    if("team_composition_score" %in% names(task_data)) {
      task_data <- task_data %>% rename(value = team_composition_score)
    }
    if("team_motivation_score" %in% names(task_data)) {
      task_data <- task_data %>% rename(value = team_motivation_score)
    }
    
    # Modell 1: Kommunikationsmedium -> Mediator
    model1 <- lm(value ~ comm, data = task_data)
    
    # Merge mit aggregierten Flow Scores
    merged_data <- task_data %>%
      left_join(flow_aggregated, by = c("participant.code", "task", "comm"))
    
    # Modell 2: Mediator -> Flow
    model2 <- lm(mean_flow_score ~ value, data = merged_data)
  }
  
  return(list(
    comm_to_mediator = model1,
    mediator_to_flow = model2
  ))
}

# ================================================================================
# LMM ANALYSEN FÜR MATH TASK
# ================================================================================

print("\n\n=== LINEAR MIXED MODELS - MATH TASK ===\n")
## [1] "\n\n=== LINEAR MIXED MODELS - MATH TASK ===\n"
# Strukturelle Integration
print("--- STRUKTURELLE INTEGRATION ---")
## [1] "--- STRUKTURELLE INTEGRATION ---"
print("\nTeam Composition:")
## [1] "\nTeam Composition:"
tc_models_math <- perform_lmm_analysis(tc_aggregated, "team_composition", "Math", FALSE)
print("Kommunikation -> Team Composition:")
## [1] "Kommunikation -> Team Composition:"
print(summary(tc_models_math$comm_to_mediator))
## 
## Call:
## lm(formula = value ~ comm, data = task_data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.34630 -0.34630 -0.05185  0.32037  2.02037 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  3.64630    0.06774  53.832   <2e-16 ***
## commJitsi   -0.07778    0.09579  -0.812    0.418    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.5247 on 118 degrees of freedom
## Multiple R-squared:  0.005556,   Adjusted R-squared:  -0.002872 
## F-statistic: 0.6593 on 1 and 118 DF,  p-value: 0.4185
print("\nTeam Composition -> Flow:")
## [1] "\nTeam Composition -> Flow:"
print(summary(tc_models_math$mediator_to_flow))
## 
## Call:
## lm(formula = mean_flow_score ~ value, data = merged_data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.03003 -0.51321  0.08097  0.60959  1.46048 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)   4.8999     0.5064   9.675   <2e-16 ***
## value         0.1382     0.1389   0.994    0.322    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.7941 on 118 degrees of freedom
## Multiple R-squared:  0.008312,   Adjusted R-squared:  -9.259e-05 
## F-statistic: 0.989 on 1 and 118 DF,  p-value: 0.322
# Funktionale Integration
print("\n--- FUNKTIONALE INTEGRATION ---")
## [1] "\n--- FUNKTIONALE INTEGRATION ---"
print("\nInformation Sharing:")
## [1] "\nInformation Sharing:"
is_models_math <- perform_lmm_analysis(is_long, "information_sharing", "Math", TRUE)
print("Kommunikation -> Information Sharing:")
## [1] "Kommunikation -> Information Sharing:"
print(summary(is_models_math$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 1496.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.4054 -0.4432  0.1194  0.4559  2.8217 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 1.977685 1.40630 
##  round            (Intercept) 0.002556 0.05056 
##  Residual                     0.705086 0.83969 
## Number of obs: 480, groups:  participant.code, 120; round, 4
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   4.7542     0.1911 107.5899  24.871  < 2e-16 ***
## commJitsi     0.8125     0.2680 118.0020   3.032  0.00298 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.701
print("\nInformation Sharing -> Flow:")
## [1] "\nInformation Sharing -> Flow:"
print(summary(is_models_math$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 524.4
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.67245 -0.43830  0.04385  0.47117  2.57378 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.574782 0.75814 
##  round            (Intercept) 0.006737 0.08208 
##  Residual                     0.251830 0.50183 
## Number of obs: 221, groups:  participant.code, 118; round, 2
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)   5.15053    0.21764  72.01104   23.67   <2e-16 ***
## value         0.08223    0.03771 218.85830    2.18   0.0303 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.895
print("\nSynchronization:")
## [1] "\nSynchronization:"
sync_models_math <- perform_lmm_analysis(sync_long, "synchronization", "Math", TRUE)
print("Kommunikation -> Synchronization:")
## [1] "Kommunikation -> Synchronization:"
print(summary(sync_models_math$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 1733.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.5506 -0.3521  0.2160  0.5734  1.8530 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.900466 0.9489  
##  round            (Intercept) 0.005521 0.0743  
##  Residual                     1.606285 1.2674  
## Number of obs: 480, groups:  participant.code, 120; round, 4
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   5.2250     0.1519  46.4473   34.39   <2e-16 ***
## commJitsi     0.3875     0.2083 118.0002    1.86   0.0654 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.686
print("\nSynchronization -> Flow:")
## [1] "\nSynchronization -> Flow:"
print(summary(sync_models_math$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 524.3
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.42051 -0.46525  0.02193  0.50646  2.24463 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.573207 0.75710 
##  round            (Intercept) 0.007269 0.08526 
##  Residual                     0.251615 0.50161 
## Number of obs: 221, groups:  participant.code, 118; round, 2
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)   5.16589    0.20634  56.53969  25.036   <2e-16 ***
## value         0.07481    0.03316 179.35983   2.256   0.0253 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.879
# Motivationale Integration
print("\n--- MOTIVATIONALE INTEGRATION ---")
## [1] "\n--- MOTIVATIONALE INTEGRATION ---"
print("\nStress:")
## [1] "\nStress:"
stress_models_math <- perform_lmm_analysis(stress_long, "stress", "Math", TRUE)
## boundary (singular) fit: see help('isSingular')
print("Kommunikation -> Stress:")
## [1] "Kommunikation -> Stress:"
print(summary(stress_models_math$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 1701.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.8433 -0.5462 -0.2301  0.5243  2.8028 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 1.185    1.089   
##  round            (Intercept) 0.000    0.000   
##  Residual                     1.395    1.181   
## Number of obs: 480, groups:  participant.code, 120; round, 4
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   2.8550     0.1599 118.0000  17.855   <2e-16 ***
## commJitsi    -0.3992     0.2261 118.0000  -1.765   0.0801 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.707
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
print("\nStress -> Flow:")
## [1] "\nStress -> Flow:"
print(summary(stress_models_math$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 521.9
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.42347 -0.45601  0.00099  0.51017  2.26190 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.560485 0.74866 
##  round            (Intercept) 0.006404 0.08003 
##  Residual                     0.251121 0.50112 
## Number of obs: 221, groups:  participant.code, 118; round, 2
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)   5.81284    0.12846  13.47164  45.251 4.12e-16 ***
## value        -0.09201    0.03324 186.00241  -2.768   0.0062 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.668
print("\nArousal:")
## [1] "\nArousal:"
arousal_models_math <- perform_lmm_analysis(arousal_long, "arousal", "Math", TRUE)
print("Kommunikation -> Arousal:")
## [1] "Kommunikation -> Arousal:"
print(summary(arousal_models_math$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 1987.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3227 -0.5840 -0.1134  0.5473  2.7575 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 2.86849  1.6937  
##  round            (Intercept) 0.05076  0.2253  
##  Residual                     2.33952  1.5295  
## Number of obs: 480, groups:  participant.code, 120; round, 4
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   4.3708     0.2650  37.6767  16.491   <2e-16 ***
## commJitsi     0.2500     0.3393 118.0000   0.737    0.463    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.640
print("\nArousal -> Flow:")
## [1] "\nArousal -> Flow:"
print(summary(arousal_models_math$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 528.5
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.45181 -0.47691  0.03409  0.48412  2.39488 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.629855 0.79363 
##  round            (Intercept) 0.003183 0.05642 
##  Residual                     0.242013 0.49195 
## Number of obs: 221, groups:  participant.code, 118; round, 2
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)   5.44918    0.14431  41.25386  37.759   <2e-16 ***
## value         0.02739    0.02451 190.20509   1.117    0.265    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.782
print("\nValence:")
## [1] "\nValence:"
valence_models_math <- perform_lmm_analysis(valence_long, "valence", "Math", TRUE)
## boundary (singular) fit: see help('isSingular')
print("Kommunikation -> Valence:")
## [1] "Kommunikation -> Valence:"
print(summary(valence_models_math$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 2032.1
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.91446 -0.57676  0.09908  0.62438  2.21176 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 1.298    1.139   
##  round            (Intercept) 0.000    0.000   
##  Residual                     3.160    1.778   
## Number of obs: 480, groups:  participant.code, 120; round, 4
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   5.5917     0.1865 118.0002  29.976   <2e-16 ***
## commJitsi     0.4750     0.2638 118.0002   1.801   0.0743 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.707
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
print("\nValence -> Flow:")
## [1] "\nValence -> Flow:"
print(summary(valence_models_math$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 526.6
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.41538 -0.49765  0.01257  0.51345  2.29046 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.593869 0.77063 
##  round            (Intercept) 0.006746 0.08214 
##  Residual                     0.248255 0.49825 
## Number of obs: 221, groups:  participant.code, 118; round, 2
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)   5.32583    0.16869  32.89348  31.572   <2e-16 ***
## value         0.04266    0.02354 156.35930   1.813   0.0718 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.814
print("\nIndividual Motivation:")
## [1] "\nIndividual Motivation:"
ind_motiv_models_math <- perform_lmm_analysis(ind_motiv_long, "individual_motivation", "Math", TRUE)
print("Kommunikation -> Individual Motivation:")
## [1] "Kommunikation -> Individual Motivation:"
print(summary(ind_motiv_models_math$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 1559.5
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.9518 -0.4443  0.1269  0.5332  2.4906 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 1.00039  1.0002  
##  round            (Intercept) 0.01569  0.1253  
##  Residual                     0.99704  0.9985  
## Number of obs: 480, groups:  participant.code, 120; round, 4
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   5.1333     0.1573  39.0491  32.630   <2e-16 ***
## commJitsi     0.0625     0.2041 118.0000   0.306     0.76    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.649
print("\nIndividual Motivation -> Flow:")
## [1] "\nIndividual Motivation -> Flow:"
print(summary(ind_motiv_models_math$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 500.7
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.25651 -0.50897  0.02032  0.48819  2.09026 
## 
## Random effects:
##  Groups           Name        Variance  Std.Dev.
##  participant.code (Intercept) 0.5212070 0.72195 
##  round            (Intercept) 0.0005332 0.02309 
##  Residual                     0.2269607 0.47640 
## Number of obs: 221, groups:  participant.code, 118; round, 2
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)   4.33591    0.23797 182.73225  18.220  < 2e-16 ***
## value         0.23383    0.04244 196.66218   5.509 1.12e-07 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.948
print("\nTeam Motivation:")
## [1] "\nTeam Motivation:"
tm_models_math <- perform_lmm_analysis(team_motiv_aggregated, "team_motivation", "Math", FALSE)
print("Kommunikation -> Team Motivation:")
## [1] "Kommunikation -> Team Motivation:"
print(summary(tm_models_math$comm_to_mediator))
## 
## Call:
## lm(formula = value ~ comm, data = task_data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -3.06111 -0.39444  0.00556  0.33889  2.27222 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  4.66111    0.10541  44.220   <2e-16 ***
## commJitsi    0.06667    0.14907   0.447    0.656    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8165 on 118 degrees of freedom
## Multiple R-squared:  0.001692,   Adjusted R-squared:  -0.006768 
## F-statistic:   0.2 on 1 and 118 DF,  p-value: 0.6555
print("\nTeam Motivation -> Flow:")
## [1] "\nTeam Motivation -> Flow:"
print(summary(tm_models_math$mediator_to_flow))
## 
## Call:
## lm(formula = mean_flow_score ~ value, data = merged_data)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.9235 -0.4783  0.0234  0.7174  1.3850 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  4.62953    0.42188  10.974   <2e-16 ***
## value        0.16376    0.08856   1.849   0.0669 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.7861 on 118 degrees of freedom
## Multiple R-squared:  0.02816,    Adjusted R-squared:  0.01993 
## F-statistic:  3.42 on 1 and 118 DF,  p-value: 0.06693
# ================================================================================
# LMM ANALYSEN FÜR HP TASK
# ================================================================================

print("\n\n=== LINEAR MIXED MODELS - HIDDEN PROFILE TASK ===\n")
## [1] "\n\n=== LINEAR MIXED MODELS - HIDDEN PROFILE TASK ===\n"
# Strukturelle Integration
print("--- STRUKTURELLE INTEGRATION ---")
## [1] "--- STRUKTURELLE INTEGRATION ---"
print("\nTeam Composition:")
## [1] "\nTeam Composition:"
tc_models_hp <- perform_lmm_analysis(tc_aggregated, "team_composition", "HP", FALSE)
print("Kommunikation -> Team Composition:")
## [1] "Kommunikation -> Team Composition:"
print(summary(tc_models_hp$comm_to_mediator))
## 
## Call:
## lm(formula = value ~ comm, data = task_data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -1.35741 -0.24630 -0.02407  0.17407  1.64259 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  3.71481    0.04995  74.369   <2e-16 ***
## commJitsi   -0.02407    0.07064  -0.341    0.734    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.3869 on 118 degrees of freedom
## Multiple R-squared:  0.0009832,  Adjusted R-squared:  -0.007483 
## F-statistic: 0.1161 on 1 and 118 DF,  p-value: 0.7339
print("\nTeam Composition -> Flow:")
## [1] "\nTeam Composition -> Flow:"
print(summary(tc_models_hp$mediator_to_flow))
## 
## Call:
## lm(formula = mean_flow_score ~ value, data = merged_data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.28615 -0.64829 -0.01969  0.76092  1.72259 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)   4.9045     0.8348   5.875    4e-08 ***
## value         0.1157     0.2243   0.516    0.607    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.943 on 118 degrees of freedom
## Multiple R-squared:  0.002252,   Adjusted R-squared:  -0.006203 
## F-statistic: 0.2664 on 1 and 118 DF,  p-value: 0.6067
# Funktionale Integration
print("\n--- FUNKTIONALE INTEGRATION ---")
## [1] "\n--- FUNKTIONALE INTEGRATION ---"
print("\nInformation Sharing:")
## [1] "\nInformation Sharing:"
is_models_hp <- perform_lmm_analysis(is_long, "information_sharing", "HP", TRUE)
print("Kommunikation -> Information Sharing:")
## [1] "Kommunikation -> Information Sharing:"
print(summary(is_models_hp$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 1086
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.2858 -0.3487  0.1965  0.5561  2.1280 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.53894  0.7341  
##  round            (Intercept) 0.07659  0.2768  
##  Residual                     0.81160  0.9009  
## Number of obs: 360, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   5.6083     0.1975   3.9287  28.391 1.08e-05 ***
## commJitsi     0.4722     0.1643 118.0001   2.875   0.0048 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.416
print("\nInformation Sharing -> Flow:")
## [1] "\nInformation Sharing -> Flow:"
print(summary(is_models_hp$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 874.4
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.77345 -0.51975  0.03478  0.60304  2.39642 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.63858  0.7991  
##  round            (Intercept) 0.01287  0.1134  
##  Residual                     0.39562  0.6290  
## Number of obs: 345, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)   4.62008    0.26465 146.08168  17.457  < 2e-16 ***
## value         0.12321    0.04179 327.93707   2.948  0.00342 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.920
print("\nSynchronization:")
## [1] "\nSynchronization:"
sync_models_hp <- perform_lmm_analysis(sync_long, "synchronization", "HP", TRUE)
print("Kommunikation -> Synchronization:")
## [1] "Kommunikation -> Synchronization:"
print(summary(sync_models_hp$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 1281.2
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.1034 -0.4119  0.2072  0.6768  1.7780 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.6074   0.7793  
##  round            (Intercept) 0.1760   0.4196  
##  Residual                     1.5545   1.2468  
## Number of obs: 360, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   5.1333     0.2783   3.0174  18.447 0.000336 ***
## commJitsi     0.5778     0.1937 118.0000   2.983 0.003470 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.348
print("\nSynchronization -> Flow:")
## [1] "\nSynchronization -> Flow:"
print(summary(sync_models_hp$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 878.6
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7197 -0.5437  0.0499  0.5904  2.2257 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.66383  0.8148  
##  round            (Intercept) 0.01247  0.1117  
##  Residual                     0.39528  0.6287  
## Number of obs: 345, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)   4.97284    0.19672  67.60974  25.278   <2e-16 ***
## value         0.06721    0.03075 289.52303   2.186   0.0296 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.848
# Motivationale Integration
print("\n--- MOTIVATIONALE INTEGRATION ---")
## [1] "\n--- MOTIVATIONALE INTEGRATION ---"
print("\nStress:")
## [1] "\nStress:"
stress_models_hp <- perform_lmm_analysis(stress_long, "stress", "HP", TRUE)
## boundary (singular) fit: see help('isSingular')
print("Kommunikation -> Stress:")
## [1] "Kommunikation -> Stress:"
print(summary(stress_models_hp$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 1199.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.3640 -0.4728 -0.1844  0.4455  3.0372 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 1.076    1.037   
##  round            (Intercept) 0.000    0.000   
##  Residual                     1.010    1.005   
## Number of obs: 360, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   2.6189     0.1534 118.0000  17.067   <2e-16 ***
## commJitsi    -0.3322     0.2170 118.0000  -1.531    0.128    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.707
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
print("\nStress -> Flow:")
## [1] "\nStress -> Flow:"
print(summary(stress_models_hp$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 871
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.2345 -0.5651  0.0593  0.6416  2.3708 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.624374 0.79017 
##  round            (Intercept) 0.007019 0.08378 
##  Residual                     0.394546 0.62813 
## Number of obs: 345, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   5.6553     0.1296  39.4627  43.651  < 2e-16 ***
## value        -0.1298     0.0368 338.0147  -3.528 0.000476 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.693
print("\nArousal:")
## [1] "\nArousal:"
arousal_models_hp <- perform_lmm_analysis(arousal_long, "arousal", "HP", TRUE)
print("Kommunikation -> Arousal:")
## [1] "Kommunikation -> Arousal:"
print(summary(arousal_models_hp$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 1400.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.0125 -0.4782 -0.0523  0.4863  3.3675 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 3.439429 1.85457 
##  round            (Intercept) 0.002754 0.05248 
##  Residual                     1.413910 1.18908 
## Number of obs: 360, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   3.9444     0.2571  91.3833  15.342   <2e-16 ***
## commJitsi     0.2389     0.3611 117.9994   0.662    0.509    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.702
print("\nArousal -> Flow:")
## [1] "\nArousal -> Flow:"
print(summary(arousal_models_hp$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 881.4
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.72063 -0.56605  0.02126  0.58875  2.09161 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.725110 0.85153 
##  round            (Intercept) 0.007184 0.08476 
##  Residual                     0.386631 0.62180 
## Number of obs: 345, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)   5.47828    0.14810  59.66438  36.992   <2e-16 ***
## value        -0.03485    0.02746 336.51216  -1.269    0.205    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.750
print("\nValence:")
## [1] "\nValence:"
valence_models_hp <- perform_lmm_analysis(valence_long, "valence", "HP", TRUE)
print("Kommunikation -> Valence:")
## [1] "Kommunikation -> Valence:"
print(summary(valence_models_hp$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 1357.1
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7002 -0.5197  0.1801  0.6122  1.9798 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 1.5133   1.2302  
##  round            (Intercept) 0.0447   0.2114  
##  Residual                     1.6053   1.2670  
## Number of obs: 360, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   6.4389     0.2215  12.1857  29.076 1.25e-12 ***
## commJitsi     0.3611     0.2613 118.0000   1.382     0.17    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.590
print("\nValence -> Flow:")
## [1] "\nValence -> Flow:"
print(summary(valence_models_hp$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 866.9
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.57817 -0.52106  0.02237  0.62288  2.02925 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.60933  0.7806  
##  round            (Intercept) 0.01222  0.1105  
##  Residual                     0.38976  0.6243  
## Number of obs: 345, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)   4.53042    0.21782 107.74296  20.799  < 2e-16 ***
## value         0.12162    0.02902 334.29068   4.191 3.56e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.885
print("\nIndividual Motivation:")
## [1] "\nIndividual Motivation:"
ind_motiv_models_hp <- perform_lmm_analysis(ind_motiv_long, "individual_motivation", "HP", TRUE)
print("Kommunikation -> Individual Motivation:")
## [1] "Kommunikation -> Individual Motivation:"
print(summary(ind_motiv_models_hp$comm_to_mediator))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: value ~ comm + (1 | participant.code) + (1 | round)
##    Data: task_data
## 
## REML criterion at convergence: 1005.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.2059 -0.4009  0.1169  0.4749  3.1321 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.85145  0.9227  
##  round            (Intercept) 0.01681  0.1296  
##  Residual                     0.52054  0.7215  
## Number of obs: 360, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##             Estimate Std. Error       df t value Pr(>|t|)    
## (Intercept)   5.2685     0.1506  18.8325  34.980   <2e-16 ***
## commJitsi     0.2870     0.1848 117.9999   1.553    0.123    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##           (Intr)
## commJitsi -0.614
print("\nIndividual Motivation -> Flow:")
## [1] "\nIndividual Motivation -> Flow:"
print(summary(ind_motiv_models_hp$mediator_to_flow))
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: flow_score ~ value + (1 | participant.code) + (1 | round)
##    Data: merged_data
## 
## REML criterion at convergence: 873.8
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -2.48626 -0.56729  0.03419  0.62328  2.01349 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.608371 0.77998 
##  round            (Intercept) 0.008902 0.09435 
##  Residual                     0.403449 0.63518 
## Number of obs: 345, groups:  participant.code, 120; round, 3
## 
## Fixed effects:
##              Estimate Std. Error        df t value Pr(>|t|)    
## (Intercept)   4.54623    0.27278 212.58282  16.667  < 2e-16 ***
## value         0.14642    0.04717 341.29311   3.104  0.00207 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##       (Intr)
## value -0.936
print("\nTeam Motivation:")
## [1] "\nTeam Motivation:"
tm_models_hp <- perform_lmm_analysis(team_motiv_aggregated, "team_motivation", "HP", FALSE)
print("Kommunikation -> Team Motivation:")
## [1] "Kommunikation -> Team Motivation:"
print(summary(tm_models_hp$comm_to_mediator))
## 
## Call:
## lm(formula = value ~ comm, data = task_data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.57778 -0.25000  0.08333  0.41667  1.75556 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  4.583333   0.084128  54.480   <2e-16 ***
## commJitsi   -0.005556   0.118975  -0.047    0.963    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.6517 on 118 degrees of freedom
## Multiple R-squared:  1.848e-05,  Adjusted R-squared:  -0.008456 
## F-statistic: 0.00218 on 1 and 118 DF,  p-value: 0.9628
print("\nTeam Motivation -> Flow:")
## [1] "\nTeam Motivation -> Flow:"
print(summary(tm_models_hp$mediator_to_flow))
## 
## Call:
## lm(formula = mean_flow_score ~ value, data = merged_data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.15386 -0.66854 -0.05506  0.69804  2.00351 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)   4.3577     0.6102   7.141  8.2e-11 ***
## value         0.2129     0.1319   1.614    0.109    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9338 on 118 degrees of freedom
## Multiple R-squared:  0.0216, Adjusted R-squared:  0.01331 
## F-statistic: 2.605 on 1 and 118 DF,  p-value: 0.1092
# ================================================================================
# TEIL 4: ZUSAMMENFASSENDE TABELLEN
# ================================================================================

# Funktion zur Extraktion der Koeffizienten
extract_coefficients <- function(model, is_lmm = TRUE) {
  if(is_lmm) {
    coef_summary <- summary(model)$coefficients
    # Extrahiere Koeffizient für commJitsi
    if("commJitsi" %in% rownames(coef_summary)) {
      return(c(
        estimate = coef_summary["commJitsi", "Estimate"],
        se = coef_summary["commJitsi", "Std. Error"],
        t = coef_summary["commJitsi", "t value"],
        p = coef_summary["commJitsi", "Pr(>|t|)"]
      ))
    }
  } else {
    coef_summary <- summary(model)$coefficients
    if("commJitsi" %in% rownames(coef_summary)) {
      return(c(
        estimate = coef_summary["commJitsi", "Estimate"],
        se = coef_summary["commJitsi", "Std. Error"],
        t = coef_summary["commJitsi", "t value"],
        p = coef_summary["commJitsi", "Pr(>|t|)"]
      ))
    }
  }
  
  # Falls value der Prädiktor ist
  if("value" %in% rownames(coef_summary)) {
    return(c(
      estimate = coef_summary["value", "Estimate"],
      se = coef_summary["value", "Std. Error"],
      t = coef_summary["value", "t value"],
      p = coef_summary["value", "Pr(>|t|)"]
    ))
  }
  
  return(c(estimate = NA, se = NA, t = NA, p = NA))
}

# Erstelle Zusammenfassungstabelle für Math Task
create_summary_table <- function(models_list, task_name) {
  summary_data <- data.frame()
  
  for(med_name in names(models_list)) {
    if(!is.null(models_list[[med_name]])) {
      # Kommunikation -> Mediator
      comm_coef <- extract_coefficients(models_list[[med_name]]$comm_to_mediator)
      
      # Mediator -> Flow
      flow_coef <- extract_coefficients(models_list[[med_name]]$mediator_to_flow)
      
      row_data <- data.frame(
        Mediator = med_name,
        Comm_to_Med_Est = round(comm_coef["estimate"], 3),
        Comm_to_Med_p = round(comm_coef["p"], 3),
        Med_to_Flow_Est = round(flow_coef["estimate"], 3),
        Med_to_Flow_p = round(flow_coef["p"], 3)
      )
      
      summary_data <- rbind(summary_data, row_data)
    }
  }
  
  return(summary_data)
}

# Sammle alle Modelle
all_models_math <- list(
  "Team Composition" = tc_models_math,
  "Information Sharing" = is_models_math,
  "Synchronization" = sync_models_math,
  "Stress" = stress_models_math,
  "Arousal" = arousal_models_math,
  "Valence" = valence_models_math,
  "Individual Motivation" = ind_motiv_models_math,
  "Team Motivation" = tm_models_math
)

all_models_hp <- list(
  "Team Composition" = tc_models_hp,
  "Information Sharing" = is_models_hp,
  "Synchronization" = sync_models_hp,
  "Stress" = stress_models_hp,
  "Arousal" = arousal_models_hp,
  "Valence" = valence_models_hp,
  "Individual Motivation" = ind_motiv_models_hp,
  "Team Motivation" = tm_models_hp
)

# Erstelle Zusammenfassungstabellen
print("\n\n=== ZUSAMMENFASSUNGSTABELLEN ===\n")
## [1] "\n\n=== ZUSAMMENFASSUNGSTABELLEN ===\n"
print("Math Task - Übersicht der Effekte:")
## [1] "Math Task - Übersicht der Effekte:"
summary_math <- create_summary_table(all_models_math, "Math")
print(summary_math)
##                        Mediator Comm_to_Med_Est Comm_to_Med_p Med_to_Flow_Est
## estimate       Team Composition          -0.078         0.418           0.138
## estimate1   Information Sharing           0.812         0.003           0.082
## estimate2       Synchronization           0.388         0.065           0.075
## estimate3                Stress          -0.399         0.080          -0.092
## estimate4               Arousal           0.250         0.463           0.027
## estimate5               Valence           0.475         0.074           0.043
## estimate6 Individual Motivation           0.063         0.760           0.234
## estimate7       Team Motivation           0.067         0.656           0.164
##           Med_to_Flow_p
## estimate          0.322
## estimate1         0.030
## estimate2         0.025
## estimate3         0.006
## estimate4         0.265
## estimate5         0.072
## estimate6         0.000
## estimate7         0.067
print("\n\nHP Task - Übersicht der Effekte:")
## [1] "\n\nHP Task - Übersicht der Effekte:"
summary_hp <- create_summary_table(all_models_hp, "HP")
print(summary_hp)
##                        Mediator Comm_to_Med_Est Comm_to_Med_p Med_to_Flow_Est
## estimate       Team Composition          -0.024         0.734           0.116
## estimate1   Information Sharing           0.472         0.005           0.123
## estimate2       Synchronization           0.578         0.003           0.067
## estimate3                Stress          -0.332         0.128          -0.130
## estimate4               Arousal           0.239         0.509          -0.035
## estimate5               Valence           0.361         0.170           0.122
## estimate6 Individual Motivation           0.287         0.123           0.146
## estimate7       Team Motivation          -0.006         0.963           0.213
##           Med_to_Flow_p
## estimate          0.607
## estimate1         0.003
## estimate2         0.030
## estimate3         0.000
## estimate4         0.205
## estimate5         0.000
## estimate6         0.002
## estimate7         0.109
# ================================================================================
# TEIL 5: VISUALISIERUNGEN
# ================================================================================

# Funktion für Mediator-Plots
create_mediator_plot <- function(mediator_data, mediator_name, task_filter) {
  # Aggregiere Daten für Visualisierung
  plot_data <- mediator_data %>%
    filter(task == task_filter) %>%
    group_by(comm) %>%
    summarise(
      mean_value = mean(value, na.rm = TRUE),
      se_value = sd(value, na.rm = TRUE) / sqrt(n()),
      .groups = "drop"
    )
  
  p <- ggplot(plot_data, aes(x = comm, y = mean_value, fill = comm)) +
    geom_bar(stat = "identity", position = "dodge") +
    geom_errorbar(aes(ymin = mean_value - se_value, ymax = mean_value + se_value),
                  width = 0.2, position = position_dodge(0.9)) +
    labs(title = paste(task_filter, "Task -", mediator_name),
         x = "Communication Medium",
         y = paste("Mean", mediator_name),
         fill = "Communication") +
    theme_minimal() +
    scale_fill_brewer(palette = "Set2")
  
  return(p)
}

# Erstelle Plots für ausgewählte Mediatoren
print("\n=== VISUALISIERUNGEN ===")
## [1] "\n=== VISUALISIERUNGEN ==="
print("Erstelle Plots für Mediatoren...")
## [1] "Erstelle Plots für Mediatoren..."
p1 <- create_mediator_plot(is_long, "Information Sharing", "Math")
p2 <- create_mediator_plot(sync_long, "Synchronization", "Math")
p3 <- create_mediator_plot(stress_long, "Stress", "Math")
p4 <- create_mediator_plot(valence_long, "Valence", "Math")


p5 <- create_mediator_plot(is_long, "Information Sharing", "HP")
p6 <- create_mediator_plot(sync_long, "Synchronization", "HP")
p7 <- create_mediator_plot(stress_long, "Stress", "HP")
p8 <- create_mediator_plot(ind_motiv_long, "Individual Motivation", "HP")

# Zeige Plots
print(p1)

print(p2)

print(p3)

print(p4)

print(p5)

print(p6)

print(p7)

print(p8)

# ================================================================================
# SCHLUSSNOTIZEN
# ================================================================================

print("\n\n=== ANALYSEN ABGESCHLOSSEN ===")
## [1] "\n\n=== ANALYSEN ABGESCHLOSSEN ==="
print("1. Repeated Measures Korrelationen für alle rundenweisen Mediatoren berechnet")
## [1] "1. Repeated Measures Korrelationen für alle rundenweisen Mediatoren berechnet"
print("2. Linear Mixed Models für Kommunikation -> Mediator erstellt")
## [1] "2. Linear Mixed Models für Kommunikation -> Mediator erstellt"
print("3. Linear Mixed Models für Mediator -> Flow erstellt")
## [1] "3. Linear Mixed Models für Mediator -> Flow erstellt"
print("4. Getrennte Analysen für Math und HP Tasks durchgeführt")
## [1] "4. Getrennte Analysen für Math und HP Tasks durchgeführt"
print("5. Zusammenfassungstabellen mit allen Effekten erstellt")
## [1] "5. Zusammenfassungstabellen mit allen Effekten erstellt"
print("6. Visualisierungen der Mediator-Unterschiede zwischen Kommunikationsmedien erstellt")
## [1] "6. Visualisierungen der Mediator-Unterschiede zwischen Kommunikationsmedien erstellt"

Mediator differences between the communication treatments between difficulty levels

# Mediator-Unterschiede zwischen Chat und Jitsi nach Schwierigkeitsstufe
# Erklärung des Shared Flow Paradoxes: Chat sinkt, Jitsi steigt mit Difficulty
# NUR AKTUELLE DATEN (Chat vs Jitsi)
# ================================================================================

library(lme4)
library(lmerTest)
library(emmeans)
library(dplyr)
library(ggplot2)

print("=== MEDIATOR-UNTERSCHIEDE: CHAT vs JITSI nach DIFFICULTY ===")
## [1] "=== MEDIATOR-UNTERSCHIEDE: CHAT vs JITSI nach DIFFICULTY ==="
print("Analyse basiert auf aktuellen Mediatoren aus dem neuen Experiment")
## [1] "Analyse basiert auf aktuellen Mediatoren aus dem neuen Experiment"
# ================================================================================
# TEIL 1: DATEN VORBEREITEN - DIREKT AUS AKTUELLEN DATEN
# ================================================================================

# Kombiniere Math und HP Daten aus flow_clean (aktueller Datensatz)
mediator_analysis_base <- flow_clean %>%
  filter(comm %in% c("Chat", "Jitsi")) %>%
  mutate(
    communication = factor(comm, levels = c("Chat", "Jitsi")),
    difficulty_simple = case_when(
      difficulty == "Easy" ~ "Easy",
      difficulty %in% c("Optimal_Selected", "Optimal_Calibrated") ~ "Optimal",
      difficulty == "Hard" ~ "Hard"
    ),
    difficulty_simple = factor(difficulty_simple, levels = c("Easy", "Optimal", "Hard"))
  )

print(paste("Basis-Analysedaten:", nrow(mediator_analysis_base), "Beobachtungen"))
## [1] "Basis-Analysedaten: 805 Beobachtungen"
print(paste("Teilnehmer:", n_distinct(mediator_analysis_base$participant.code)))
## [1] "Teilnehmer: 120"
print(paste("Teams:", n_distinct(mediator_analysis_base$team_id)))
## [1] "Teams: 40"
# Überprüfe Datenverteilung
print("\n--- DATENVERTEILUNG ---")
## [1] "\n--- DATENVERTEILUNG ---"
data_distribution <- mediator_analysis_base %>%
  group_by(communication, difficulty_simple, task) %>%
  summarise(
    n_obs = n(),
    n_participants = n_distinct(participant.code),
    .groups = "drop"
  )
print(data_distribution)
## # A tibble: 12 × 5
##    communication difficulty_simple task  n_obs n_participants
##    <fct>         <fct>             <chr> <int>          <int>
##  1 Chat          Easy              HP       58             58
##  2 Chat          Easy              Math     60             60
##  3 Chat          Optimal           Math    120             60
##  4 Chat          Hard              HP       56             56
##  5 Chat          Hard              Math     53             53
##  6 Chat          <NA>              HP       57             57
##  7 Jitsi         Easy              HP       60             60
##  8 Jitsi         Easy              Math     56             56
##  9 Jitsi         Optimal           Math    118             60
## 10 Jitsi         Hard              HP       56             56
## 11 Jitsi         Hard              Math     53             53
## 12 Jitsi         <NA>              HP       58             58
# ================================================================================
# TEIL 2: VERWENDE BEREITS EXTRAHIERTE MEDIATOREN
# ================================================================================

print("\n=== VERWENDE BEREITS EXTRAHIERTE MEDIATOREN ===")
## [1] "\n=== VERWENDE BEREITS EXTRAHIERTE MEDIATOREN ==="
# Prüfe welche Mediator-Datensätze verfügbar sind
available_mediator_datasets <- list()

if(exists("stress_long")) {
  available_mediator_datasets[["stress"]] <- stress_long
}
if(exists("is_long")) {
  available_mediator_datasets[["information_sharing"]] <- is_long
}
if(exists("sync_long")) {
  available_mediator_datasets[["synchronization"]] <- sync_long
}
if(exists("tc_aggregated")) {
  available_mediator_datasets[["team_composition"]] <- tc_aggregated
}
if(exists("arousal_long")) {
  available_mediator_datasets[["arousal"]] <- arousal_long
}
if(exists("valence_long")) {
  available_mediator_datasets[["valence"]] <- valence_long
}
if(exists("ind_motiv_long")) {
  available_mediator_datasets[["individual_motivation"]] <- ind_motiv_long
}
if(exists("team_motiv_aggregated")) {
  available_mediator_datasets[["team_motivation"]] <- team_motiv_aggregated
}

print("Verfügbare Mediator-Datensätze:")
## [1] "Verfügbare Mediator-Datensätze:"
print(names(available_mediator_datasets))
## [1] "stress"                "information_sharing"   "synchronization"      
## [4] "team_composition"      "arousal"               "valence"              
## [7] "individual_motivation" "team_motivation"
# Aggregiere die rundenweisen Mediatoren für die Analyse
# (wir brauchen einen Wert pro participant.code × task × comm)

mediator_aggregated_list <- list()

for(med_name in names(available_mediator_datasets)) {
  med_data <- available_mediator_datasets[[med_name]]
  
  if("round" %in% names(med_data)) {
    # Rundenweise Mediatoren: Aggregiere über Runden
    med_agg <- med_data %>%
      group_by(participant.code, task, comm) %>%
      dplyr::summarise(!!paste0(med_name, "_score") := mean(value, na.rm = TRUE), .groups = "drop")
  } else {
    # Einmalige Mediatoren: Rename value column
    med_agg <- med_data %>%
      dplyr::rename(!!paste0(med_name, "_score") := value)
  }
  
  mediator_aggregated_list[[med_name]] <- med_agg
}

# Kombiniere alle Mediatoren mit den Basis-Daten
mediator_analysis_data <- mediator_analysis_base

for(med_name in names(mediator_aggregated_list)) {
  mediator_analysis_data <- mediator_analysis_data %>%
    left_join(mediator_aggregated_list[[med_name]], by = c("participant.code", "task", "comm"))
}

# Verfügbare Mediatoren identifizieren
mediator_cols <- names(mediator_analysis_data)[grepl("_score$", names(mediator_analysis_data))]
mediator_cols <- mediator_cols[mediator_cols != "flow_score"]  # Entferne flow_score
print("Verfügbare Mediatoren (aus bereits extrahierten Datensätzen):")
## [1] "Verfügbare Mediatoren (aus bereits extrahierten Datensätzen):"
print(mediator_cols)
## [1] "stress_score"                "information_sharing_score"  
## [3] "synchronization_score"       "team_composition_score"     
## [5] "arousal_score"               "valence_score"              
## [7] "individual_motivation_score" "team_motivation_score"
# Prüfe Datenverteilung
print("\n--- DATENVERTEILUNG MIT MEDIATOREN ---")
## [1] "\n--- DATENVERTEILUNG MIT MEDIATOREN ---"
data_distribution_with_mediators <- mediator_analysis_data %>%
  group_by(communication, difficulty_simple, task) %>%
  summarise(
    n_obs = n(),
    n_participants = n_distinct(participant.code),
    n_with_stress = sum(!is.na(stress_score)),
    n_with_info = sum(!is.na(information_sharing_score)),
    .groups = "drop"
  )
print(data_distribution_with_mediators)
## # A tibble: 12 × 7
##    communication difficulty_simple task  n_obs n_participants n_with_stress
##    <fct>         <fct>             <chr> <int>          <int>         <int>
##  1 Chat          Easy              HP       58             58            58
##  2 Chat          Easy              Math     60             60            60
##  3 Chat          Optimal           Math    120             60           120
##  4 Chat          Hard              HP       56             56            56
##  5 Chat          Hard              Math     53             53            53
##  6 Chat          <NA>              HP       57             57            57
##  7 Jitsi         Easy              HP       60             60            60
##  8 Jitsi         Easy              Math     56             56            56
##  9 Jitsi         Optimal           Math    118             60           118
## 10 Jitsi         Hard              HP       56             56            56
## 11 Jitsi         Hard              Math     53             53            53
## 12 Jitsi         <NA>              HP       58             58            58
## # ℹ 1 more variable: n_with_info <int>
# ================================================================================
# TEIL 3: MIXED-EFFECTS ANALYSEN
# ================================================================================

print("\n=== MIXED-EFFECTS ANALYSEN ===")
## [1] "\n=== MIXED-EFFECTS ANALYSEN ==="
mediator_results <- list()

for(mediator in mediator_cols) {
  cat("\n", rep("=", 60), "\n", sep = "")
  mediator_name <- gsub("_score$", "", mediator)
  cat("MEDIATOR:", toupper(mediator_name), "\n")
  cat(rep("=", 60), "\n", sep = "")
  
  # Filtere Daten für aktuellen Mediator
  mediator_data <- mediator_analysis_data %>%
    filter(!is.na(!!sym(mediator)))
  
  if(nrow(mediator_data) < 20) {
    cat("Zu wenige Daten für", mediator_name, "- übersprungen\n")
    next
  }
  
  tryCatch({
    # Erst prüfen, ob genug Varianz in den Faktoren vorhanden ist
    cat("Datencheck für", mediator_name, ":\n")
    cat("- Communication levels:", paste(unique(mediator_data$communication), collapse = ", "), "\n")
    cat("- Difficulty levels:", paste(unique(mediator_data$difficulty_simple), collapse = ", "), "\n") 
    cat("- Task levels:", paste(unique(mediator_data$task), collapse = ", "), "\n")
    cat("- Anzahl Teams:", n_distinct(mediator_data$team_id), "\n")
    
    # Prüfe ob alle Faktoren mindestens 2 Stufen haben
    if(n_distinct(mediator_data$communication) < 2) {
      cat("ÜBERSPRUNGEN: Nur", n_distinct(mediator_data$communication), "Communication level(s)\n")
      next
    }
    if(n_distinct(mediator_data$difficulty_simple) < 2) {
      cat("ÜBERSPRUNGEN: Nur", n_distinct(mediator_data$difficulty_simple), "Difficulty level(s)\n")
      next
    }
    if(n_distinct(mediator_data$task) < 2) {
      cat("ÜBERSPRUNGEN: Nur", n_distinct(mediator_data$task), "Task level(s)\n") 
      next
    }
    
    # Vereinfachtes Modell wenn zu wenig Teams
    if(n_distinct(mediator_data$team_id) < 5) {
      formula_str <- paste(mediator, "~ communication * difficulty_simple * task + (1|participant.code)")
    } else {
      formula_str <- paste(mediator, "~ communication * difficulty_simple * task + (1|participant.code) + (1|team_id)")
    }
    
    cat("Verwende Formel:", formula_str, "\n")
    model_full <- lmer(as.formula(formula_str), data = mediator_data)
    
    cat("\n--- MODELL ZUSAMMENFASSUNG ---\n")
    print(summary(model_full))
    
    # ANOVA
    cat("\n--- ANOVA ERGEBNISSE ---\n")
    anova_result <- anova(model_full)
    print(anova_result)
    
    # Post-Hoc Tests: Communication|Difficulty
    cat("\n--- POST-HOC: Communication × Difficulty ---\n")
    emmeans_comm_diff <- emmeans(model_full, pairwise ~ communication | difficulty_simple)
    posthoc_comm_diff <- as.data.frame(emmeans_comm_diff$contrasts)
    print(posthoc_comm_diff)
    
    # Marginal Means
    cat("\n--- MARGINAL MEANS ---\n")
    marginal_means <- as.data.frame(emmeans(model_full, ~ communication * difficulty_simple * task))
    print(marginal_means)
    
    # Speichere Ergebnisse
    mediator_results[[mediator]] <- list(
      model = model_full,
      anova = anova_result,
      posthoc_comm_diff = posthoc_comm_diff,
      marginal_means = marginal_means
    )
    
  }, error = function(e) {
    cat("Fehler bei", mediator_name, ":", e$message, "\n")
    
    # Noch einfacheres Fallback-Modell
    cat("Versuche einfacheres Modell...\n")
    tryCatch({
      simple_formula <- paste(mediator, "~ communication + difficulty_simple + task + (1|participant.code)")
      simple_model <- lmer(as.formula(simple_formula), data = mediator_data)
      
      cat("Einfaches Modell erfolgreich:\n")
      print(summary(simple_model))
      
      mediator_results[[mediator]] <- list(
        model = simple_model,
        note = "Vereinfachtes Modell ohne Interaktionen"
      )
      
    }, error = function(e2) {
      cat("Auch einfaches Modell fehlgeschlagen:", e2$message, "\n")
    })
  })
}
## 
## ============================================================
## MEDIATOR: STRESS 
## ============================================================
## Datencheck für stress :
## - Communication levels: Jitsi, Chat 
## - Difficulty levels: Optimal, Hard, Easy, NA 
## - Task levels: Math, HP 
## - Anzahl Teams: 40 
## Verwende Formel: stress_score ~ communication * difficulty_simple * task + (1|participant.code) + (1|team_id)
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- MODELL ZUSAMMENFASSUNG ---
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.formula(formula_str)
##    Data: mediator_data
## 
## REML criterion at convergence: 1586.7
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.6181 -0.3968 -0.0271  0.4453  4.0015 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 1.1401   1.068   
##  team_id          (Intercept) 0.0237   0.154   
##  Residual                     0.3353   0.579   
## Number of obs: 690, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                                     Estimate Std. Error
## (Intercept)                                        2.581e+00  1.613e-01
## communicationJitsi                                -2.941e-01  2.276e-01
## difficulty_simpleOptimal                           1.817e-14  9.155e-02
## difficulty_simpleHard                             -9.288e-03  1.089e-01
## taskMath                                           2.742e-01  1.068e-01
## communicationJitsi:difficulty_simpleOptimal       -1.595e-02  1.315e-01
## communicationJitsi:difficulty_simpleHard           3.221e-03  1.533e-01
## communicationJitsi:taskMath                       -8.896e-02  1.519e-01
## difficulty_simpleHard:taskMath                     1.702e-02  1.548e-01
## communicationJitsi:difficulty_simpleHard:taskMath -2.070e-02  2.191e-01
##                                                           df t value Pr(>|t|)
## (Intercept)                                        5.710e+01  16.001   <2e-16
## communicationJitsi                                 5.659e+01  -1.292   0.2015
## difficulty_simpleOptimal                           5.615e+02   0.000   1.0000
## difficulty_simpleHard                              5.620e+02  -0.085   0.9320
## taskMath                                           5.618e+02   2.567   0.0105
## communicationJitsi:difficulty_simpleOptimal        5.618e+02  -0.121   0.9035
## communicationJitsi:difficulty_simpleHard           5.620e+02   0.021   0.9832
## communicationJitsi:taskMath                        5.620e+02  -0.586   0.5584
## difficulty_simpleHard:taskMath                     5.624e+02   0.110   0.9125
## communicationJitsi:difficulty_simpleHard:taskMath  5.621e+02  -0.094   0.9248
##                                                      
## (Intercept)                                       ***
## communicationJitsi                                   
## difficulty_simpleOptimal                             
## difficulty_simpleHard                                
## taskMath                                          *  
## communicationJitsi:difficulty_simpleOptimal          
## communicationJitsi:difficulty_simpleHard             
## communicationJitsi:taskMath                          
## difficulty_simpleHard:taskMath                       
## communicationJitsi:difficulty_simpleHard:taskMath    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cmmncJ dffc_O dffc_H tskMth cmJ:_O cmJ:_H cmmJ:M df_H:M
## commnctnJts -0.709                                                        
## dffclty_smO  0.000  0.000                                                 
## dffclty_smH -0.331  0.234  0.000                                          
## taskMath    -0.338  0.240 -0.571  0.499                                   
## cmmnctnJ:_O  0.000  0.000 -0.696  0.000  0.398                            
## cmmnctnJ:_H  0.235 -0.327  0.000 -0.710 -0.354  0.000                     
## cmmnctnJt:M  0.238 -0.330  0.402 -0.351 -0.703 -0.583  0.489              
## dffclty_H:M  0.233 -0.165  0.394 -0.705 -0.690 -0.275  0.501  0.485       
## cmmncJ:_H:M -0.165  0.229 -0.279  0.498  0.487  0.403 -0.699 -0.692 -0.707
## fit warnings:
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- ANOVA ERGEBNISSE ---
## Missing cells for: difficulty_simpleOptimal:taskHP, communicationChat:difficulty_simpleOptimal:taskHP, communicationJitsi:difficulty_simpleOptimal:taskHP.  
## Interpret type III hypotheses with care.
## Type III Analysis of Variance Table with Satterthwaite's method
##                                      Sum Sq Mean Sq NumDF  DenDF F value
## communication                        0.9570  0.9570     1  38.21  2.8542
## difficulty_simple                    0.0087  0.0043     2 562.20  0.0129
## task                                 6.0424  6.0424     1 562.57 18.0218
## communication:difficulty_simple      0.0030  0.0015     2 562.20  0.0044
## communication:task                   0.2742  0.2742     1 562.57  0.8179
## difficulty_simple:task               0.0012  0.0012     1 562.14  0.0037
## communication:difficulty_simple:task 0.0030  0.0030     1 562.14  0.0089
##                                         Pr(>F)    
## communication                          0.09928 .  
## difficulty_simple                      0.98718    
## task                                 2.555e-05 ***
## communication:difficulty_simple        0.99557    
## communication:task                     0.36617    
## difficulty_simple:task                 0.95152    
## communication:difficulty_simple:task   0.92475    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## --- POST-HOC: Communication × Difficulty ---
## NOTE: Results may be misleading due to involvement in interactions
## difficulty_simple = Easy:
##  contrast      estimate        SE    df t.ratio p.value
##  Chat - Jitsi 0.3385708 0.2148144 45.10   1.576  0.1220
## 
## difficulty_simple = Optimal:
##  contrast      estimate        SE    df t.ratio p.value
##  Chat - Jitsi    nonEst        NA    NA      NA      NA
## 
## difficulty_simple = Hard:
##  contrast      estimate        SE    df t.ratio p.value
##  Chat - Jitsi 0.3457014 0.2160175 46.11   1.600  0.1164
## 
## Results are averaged over the levels of: task 
## Degrees-of-freedom method: kenward-roger 
## 
## --- MARGINAL MEANS ---
##  communication difficulty_simple task   emmean        SE    df lower.CL
##  Chat          Easy              HP   2.580755 0.1612864 57.16 2.257804
##  Jitsi         Easy              HP   2.286667 0.1605479 56.14 1.965068
##  Chat          Optimal           HP     nonEst        NA    NA       NA
##  Jitsi         Optimal           HP     nonEst        NA    NA       NA
##  Chat          Hard              HP   2.571467 0.1620338 58.20 2.247145
##  Jitsi         Hard              HP   2.280600 0.1620476 58.21 1.956252
##  Chat          Easy              Math 2.855000 0.1605479 56.14 2.533402
##  Jitsi         Easy              Math 2.471947 0.1620685 58.24 2.147560
##  Chat          Optimal           Math 2.855000 0.1515968 44.75 2.549621
##  Jitsi         Optimal           Math 2.455997 0.1517906 44.98 2.150270
##  Chat          Hard              Math 2.862728 0.1632742 59.95 2.536126
##  Jitsi         Hard              Math 2.462193 0.1633334 60.03 2.135480
##  upper.CL
##  2.903707
##  2.608265
##        NA
##        NA
##  2.895789
##  2.604948
##  3.176598
##  2.796334
##  3.160379
##  2.761723
##  3.189331
##  2.788905
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## ============================================================
## MEDIATOR: INFORMATION_SHARING 
## ============================================================
## Datencheck für information_sharing :
## - Communication levels: Jitsi, Chat 
## - Difficulty levels: Optimal, Hard, Easy, NA 
## - Task levels: Math, HP 
## - Anzahl Teams: 40 
## Verwende Formel: information_sharing_score ~ communication * difficulty_simple * task + (1|participant.code) + (1|team_id)
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- MODELL ZUSAMMENFASSUNG ---
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.formula(formula_str)
##    Data: mediator_data
## 
## REML criterion at convergence: 1864.9
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.7060 -0.4456  0.0639  0.4787  4.1893 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.7192   0.8481  
##  team_id          (Intercept) 0.3669   0.6058  
##  Residual                     0.5648   0.7515  
## Number of obs: 690, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                                     Estimate Std. Error
## (Intercept)                                        5.607e+00  2.004e-01
## communicationJitsi                                 4.732e-01  2.826e-01
## difficulty_simpleOptimal                           1.754e-14  1.188e-01
## difficulty_simpleHard                             -2.439e-03  1.413e-01
## taskMath                                          -8.532e-01  1.386e-01
## communicationJitsi:difficulty_simpleOptimal        1.268e-02  1.706e-01
## communicationJitsi:difficulty_simpleHard           8.340e-02  1.990e-01
## communicationJitsi:taskMath                        3.586e-01  1.971e-01
## difficulty_simpleHard:taskMath                     6.938e-03  2.009e-01
## communicationJitsi:difficulty_simpleHard:taskMath -1.100e-01  2.843e-01
##                                                           df t value Pr(>|t|)
## (Intercept)                                        5.962e+01  27.987  < 2e-16
## communicationJitsi                                 5.905e+01   1.674   0.0994
## difficulty_simpleOptimal                           5.606e+02   0.000   1.0000
## difficulty_simpleHard                              5.617e+02  -0.017   0.9862
## taskMath                                           5.612e+02  -6.154 1.44e-09
## communicationJitsi:difficulty_simpleOptimal        5.612e+02   0.074   0.9408
## communicationJitsi:difficulty_simpleHard           5.617e+02   0.419   0.6752
## communicationJitsi:taskMath                        5.615e+02   1.819   0.0695
## difficulty_simpleHard:taskMath                     5.624e+02   0.035   0.9725
## communicationJitsi:difficulty_simpleHard:taskMath  5.619e+02  -0.387   0.6989
##                                                      
## (Intercept)                                       ***
## communicationJitsi                                .  
## difficulty_simpleOptimal                             
## difficulty_simpleHard                                
## taskMath                                          ***
## communicationJitsi:difficulty_simpleOptimal          
## communicationJitsi:difficulty_simpleHard             
## communicationJitsi:taskMath                       .  
## difficulty_simpleHard:taskMath                       
## communicationJitsi:difficulty_simpleHard:taskMath    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cmmncJ dffc_O dffc_H tskMth cmJ:_O cmJ:_H cmmJ:M df_H:M
## commnctnJts -0.709                                                        
## dffclty_smO  0.000  0.000                                                 
## dffclty_smH -0.345  0.245  0.000                                          
## taskMath    -0.353  0.250 -0.571  0.499                                   
## cmmnctnJ:_O  0.000  0.000 -0.696  0.000  0.398                            
## cmmnctnJ:_H  0.245 -0.341  0.000 -0.710 -0.354  0.000                     
## cmmnctnJt:M  0.248 -0.345  0.402 -0.351 -0.703 -0.583  0.489              
## dffclty_H:M  0.244 -0.173  0.394 -0.705 -0.690 -0.275  0.500  0.485       
## cmmncJ:_H:M -0.172  0.239 -0.279  0.498  0.488  0.403 -0.699 -0.692 -0.707
## fit warnings:
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- ANOVA ERGEBNISSE ---
## Missing cells for: difficulty_simpleOptimal:taskHP, communicationChat:difficulty_simpleOptimal:taskHP, communicationJitsi:difficulty_simpleOptimal:taskHP.  
## Interpret type III hypotheses with care.
## Type III Analysis of Variance Table with Satterthwaite's method
##                                      Sum Sq Mean Sq NumDF  DenDF F value
## communication                         4.143   4.143     1  38.32  7.3352
## difficulty_simple                     0.040   0.020     2 561.97  0.0354
## task                                 54.214  54.214     1 562.53 95.9836
## communication:difficulty_simple       0.043   0.021     2 561.97  0.0379
## communication:task                    2.564   2.564     1 562.53  4.5397
## difficulty_simple:task                0.065   0.065     1 561.92  0.1143
## communication:difficulty_simple:task  0.085   0.085     1 561.92  0.1497
##                                       Pr(>F)    
## communication                        0.01005 *  
## difficulty_simple                    0.96518    
## task                                 < 2e-16 ***
## communication:difficulty_simple      0.96278    
## communication:task                   0.03355 *  
## difficulty_simple:task               0.73538    
## communication:difficulty_simple:task 0.69894    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## --- POST-HOC: Communication × Difficulty ---
## NOTE: Results may be misleading due to involvement in interactions
## difficulty_simple = Easy:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.6524979 0.2653038 45.95  -2.459  0.0177
## 
## difficulty_simple = Optimal:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi     nonEst        NA    NA      NA      NA
## 
## difficulty_simple = Hard:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.6808938 0.2669261 47.07  -2.551  0.0141
## 
## Results are averaged over the levels of: task 
## Degrees-of-freedom method: kenward-roger 
## 
## --- MARGINAL MEANS ---
##  communication difficulty_simple task   emmean        SE    df lower.CL
##  Chat          Easy              HP   5.607345 0.2003581 59.56 5.206508
##  Jitsi         Easy              HP   6.080556 0.1993695 58.43 5.681536
##  Chat          Optimal           HP     nonEst        NA    NA       NA
##  Jitsi         Optimal           HP     nonEst        NA    NA       NA
##  Chat          Hard              HP   5.604905 0.2013634 60.74 5.202219
##  Jitsi         Hard              HP   6.161519 0.2013841 60.75 5.758793
##  Chat          Easy              Math 4.754167 0.1993695 58.43 4.355147
##  Jitsi         Easy              Math 5.585952 0.2014116 60.78 5.183175
##  Chat          Optimal           Math 4.754167 0.1871934 45.56 4.377268
##  Jitsi         Optimal           Math 5.598631 0.1874539 45.81 5.221264
##  Chat          Hard              Math 4.758666 0.2030322 62.71 4.352901
##  Jitsi         Hard              Math 5.563840 0.2031040 62.79 5.157942
##  upper.CL
##  6.008182
##  6.479575
##        NA
##        NA
##  6.007592
##  6.564244
##  5.153186
##  5.988729
##  5.131065
##  5.975998
##  5.164430
##  5.969738
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## ============================================================
## MEDIATOR: SYNCHRONIZATION 
## ============================================================
## Datencheck für synchronization :
## - Communication levels: Jitsi, Chat 
## - Difficulty levels: Optimal, Hard, Easy, NA 
## - Task levels: Math, HP 
## - Anzahl Teams: 40 
## Verwende Formel: synchronization_score ~ communication * difficulty_simple * task + (1|participant.code) + (1|team_id)
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- MODELL ZUSAMMENFASSUNG ---
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.formula(formula_str)
##    Data: mediator_data
## 
## REML criterion at convergence: 1637.3
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -2.8029 -0.4371  0.0746  0.4504  4.5223 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.6638   0.8147  
##  team_id          (Intercept) 0.1727   0.4156  
##  Residual                     0.3939   0.6276  
## Number of obs: 690, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                                     Estimate Std. Error
## (Intercept)                                        5.157e+00  1.629e-01
## communicationJitsi                                 5.546e-01  2.298e-01
## difficulty_simpleOptimal                          -5.976e-15  9.923e-02
## difficulty_simpleHard                              5.335e-02  1.180e-01
## taskMath                                           6.846e-02  1.158e-01
## communicationJitsi:difficulty_simpleOptimal        1.583e-02  1.425e-01
## communicationJitsi:difficulty_simpleHard          -2.624e-02  1.662e-01
## communicationJitsi:taskMath                       -1.766e-01  1.646e-01
## difficulty_simpleHard:taskMath                    -8.899e-02  1.678e-01
## communicationJitsi:difficulty_simpleHard:taskMath  5.595e-02  2.374e-01
##                                                           df t value Pr(>|t|)
## (Intercept)                                        6.113e+01  31.652   <2e-16
## communicationJitsi                                 6.051e+01   2.413   0.0189
## difficulty_simpleOptimal                           5.612e+02   0.000   1.0000
## difficulty_simpleHard                              5.621e+02   0.452   0.6513
## taskMath                                           5.617e+02   0.591   0.5546
## communicationJitsi:difficulty_simpleOptimal        5.617e+02   0.111   0.9116
## communicationJitsi:difficulty_simpleHard           5.621e+02  -0.158   0.8746
## communicationJitsi:taskMath                        5.620e+02  -1.073   0.2839
## difficulty_simpleHard:taskMath                     5.627e+02  -0.530   0.5961
## communicationJitsi:difficulty_simpleHard:taskMath  5.623e+02   0.236   0.8138
##                                                      
## (Intercept)                                       ***
## communicationJitsi                                *  
## difficulty_simpleOptimal                             
## difficulty_simpleHard                                
## taskMath                                             
## communicationJitsi:difficulty_simpleOptimal          
## communicationJitsi:difficulty_simpleHard             
## communicationJitsi:taskMath                          
## difficulty_simpleHard:taskMath                       
## communicationJitsi:difficulty_simpleHard:taskMath    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cmmncJ dffc_O dffc_H tskMth cmJ:_O cmJ:_H cmmJ:M df_H:M
## commnctnJts -0.709                                                        
## dffclty_smO  0.000  0.000                                                 
## dffclty_smH -0.355  0.251  0.000                                          
## taskMath    -0.363  0.257 -0.571  0.499                                   
## cmmnctnJ:_O  0.000  0.000 -0.696  0.000  0.398                            
## cmmnctnJ:_H  0.252 -0.350  0.000 -0.710 -0.354  0.000                     
## cmmnctnJt:M  0.255 -0.354  0.402 -0.351 -0.703 -0.583  0.489              
## dffclty_H:M  0.250 -0.177  0.394 -0.705 -0.690 -0.275  0.501  0.485       
## cmmncJ:_H:M -0.177  0.246 -0.279  0.498  0.488  0.403 -0.699 -0.692 -0.707
## fit warnings:
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- ANOVA ERGEBNISSE ---
## Missing cells for: difficulty_simpleOptimal:taskHP, communicationChat:difficulty_simpleOptimal:taskHP, communicationJitsi:difficulty_simpleOptimal:taskHP.  
## Interpret type III hypotheses with care.
## Type III Analysis of Variance Table with Satterthwaite's method
##                                       Sum Sq Mean Sq NumDF  DenDF F value
## communication                        1.99893 1.99893     1  38.24  5.0748
## difficulty_simple                    0.04978 0.02489     2 562.36  0.0632
## task                                 0.28203 0.28203     1 562.88  0.7160
## communication:difficulty_simple      0.00011 0.00006     2 562.36  0.0001
## communication:task                   0.61452 0.61452     1 562.88  1.5601
## difficulty_simple:task               0.10402 0.10402     1 562.30  0.2641
## communication:difficulty_simple:task 0.02187 0.02187     1 562.30  0.0555
##                                       Pr(>F)  
## communication                        0.03009 *
## difficulty_simple                    0.93878  
## task                                 0.39782  
## communication:difficulty_simple      0.99986  
## communication:task                   0.21217  
## difficulty_simple:task               0.60754  
## communication:difficulty_simple:task 0.81380  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## --- POST-HOC: Communication × Difficulty ---
## NOTE: Results may be misleading due to involvement in interactions
## difficulty_simple = Easy:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.4662716 0.2148868 46.53  -2.170  0.0352
## 
## difficulty_simple = Optimal:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi     nonEst        NA    NA      NA      NA
## 
## difficulty_simple = Hard:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.4680119 0.2162885 47.74  -2.164  0.0355
## 
## Results are averaged over the levels of: task 
## Degrees-of-freedom method: kenward-roger 
## 
## --- MARGINAL MEANS ---
##  communication difficulty_simple task   emmean        SE    df lower.CL
##  Chat          Easy              HP   5.156537 0.1629150 61.23 4.830793
##  Jitsi         Easy              HP   5.711111 0.1620632 59.99 5.386936
##  Chat          Optimal           HP     nonEst        NA    NA       NA
##  Jitsi         Optimal           HP     nonEst        NA    NA       NA
##  Chat          Hard              HP   5.209886 0.1637795 62.50 4.882547
##  Jitsi         Hard              HP   5.738225 0.1637962 62.52 5.410854
##  Chat          Easy              Math 5.225000 0.1620632 59.99 4.900824
##  Jitsi         Easy              Math 5.602969 0.1638197 62.54 5.275555
##  Chat          Optimal           Math 5.225000 0.1515982 46.11 4.919868
##  Jitsi         Optimal           Math 5.618799 0.1518237 46.38 5.313261
##  Chat          Hard              Math 5.189363 0.1652132 64.64 4.859375
##  Jitsi         Hard              Math 5.597048 0.1652769 64.73 5.266941
##  upper.CL
##  5.482281
##  6.035287
##        NA
##        NA
##  5.537224
##  6.065595
##  5.549176
##  5.930383
##  5.530132
##  5.924336
##  5.519351
##  5.927155
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## ============================================================
## MEDIATOR: TEAM_COMPOSITION 
## ============================================================
## Datencheck für team_composition :
## - Communication levels: Jitsi, Chat 
## - Difficulty levels: Optimal, Hard, Easy, NA 
## - Task levels: Math, HP 
## - Anzahl Teams: 40 
## Verwende Formel: team_composition_score ~ communication * difficulty_simple * task + (1|participant.code) + (1|team_id)
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- MODELL ZUSAMMENFASSUNG ---
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.formula(formula_str)
##    Data: mediator_data
## 
## REML criterion at convergence: 396.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.1077 -0.5666  0.0087  0.4558  2.9526 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.16030  0.4004  
##  team_id          (Intercept) 0.01091  0.1044  
##  Residual                     0.06028  0.2455  
## Number of obs: 690, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                                     Estimate Std. Error
## (Intercept)                                        3.720e+00  6.530e-02
## communicationJitsi                                -2.935e-02  9.212e-02
## difficulty_simpleOptimal                           7.133e-15  3.882e-02
## difficulty_simpleHard                             -2.301e-02  4.616e-02
## taskMath                                          -7.380e-02  4.530e-02
## communicationJitsi:difficulty_simpleOptimal        1.127e-02  5.574e-02
## communicationJitsi:difficulty_simpleHard           2.005e-02  6.501e-02
## communicationJitsi:taskMath                       -5.624e-02  6.441e-02
## difficulty_simpleHard:taskMath                     2.289e-02  6.564e-02
## communicationJitsi:difficulty_simpleHard:taskMath -2.498e-02  9.290e-02
##                                                           df t value Pr(>|t|)
## (Intercept)                                        5.961e+01  56.968   <2e-16
## communicationJitsi                                 5.903e+01  -0.319    0.751
## difficulty_simpleOptimal                           5.608e+02   0.000    1.000
## difficulty_simpleHard                              5.615e+02  -0.498    0.618
## taskMath                                           5.612e+02  -1.629    0.104
## communicationJitsi:difficulty_simpleOptimal        5.612e+02   0.202    0.840
## communicationJitsi:difficulty_simpleHard           5.615e+02   0.308    0.758
## communicationJitsi:taskMath                        5.614e+02  -0.873    0.383
## difficulty_simpleHard:taskMath                     5.619e+02   0.349    0.727
## communicationJitsi:difficulty_simpleHard:taskMath  5.616e+02  -0.269    0.788
##                                                      
## (Intercept)                                       ***
## communicationJitsi                                   
## difficulty_simpleOptimal                             
## difficulty_simpleHard                                
## taskMath                                             
## communicationJitsi:difficulty_simpleOptimal          
## communicationJitsi:difficulty_simpleHard             
## communicationJitsi:taskMath                          
## difficulty_simpleHard:taskMath                       
## communicationJitsi:difficulty_simpleHard:taskMath    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cmmncJ dffc_O dffc_H tskMth cmJ:_O cmJ:_H cmmJ:M df_H:M
## commnctnJts -0.709                                                        
## dffclty_smO  0.000  0.000                                                 
## dffclty_smH -0.346  0.245  0.000                                          
## taskMath    -0.354  0.251 -0.571  0.499                                   
## cmmnctnJ:_O  0.000  0.000 -0.696  0.000  0.398                            
## cmmnctnJ:_H  0.246 -0.342  0.000 -0.710 -0.354  0.000                     
## cmmnctnJt:M  0.249 -0.346  0.402 -0.351 -0.703 -0.583  0.489              
## dffclty_H:M  0.244 -0.173  0.394 -0.705 -0.690 -0.275  0.501  0.485       
## cmmncJ:_H:M -0.173  0.240 -0.279  0.498  0.488  0.403 -0.699 -0.692 -0.707
## fit warnings:
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- ANOVA ERGEBNISSE ---
## Missing cells for: difficulty_simpleOptimal:taskHP, communicationChat:difficulty_simpleOptimal:taskHP, communicationJitsi:difficulty_simpleOptimal:taskHP.  
## Interpret type III hypotheses with care.
## Type III Analysis of Variance Table with Satterthwaite's method
##                                       Sum Sq Mean Sq NumDF  DenDF F value
## communication                        0.02495 0.02495     1  38.22  0.4139
## difficulty_simple                    0.01190 0.00595     2 561.68  0.0987
## task                                 1.04045 1.04045     1 562.10 17.2617
## communication:difficulty_simple      0.00712 0.00356     2 561.68  0.0591
## communication:task                   0.13136 0.13136     1 562.10  2.1794
## difficulty_simple:task               0.00302 0.00302     1 561.62  0.0501
## communication:difficulty_simple:task 0.00436 0.00436     1 561.62  0.0723
##                                         Pr(>F)    
## communication                           0.5238    
## difficulty_simple                       0.9061    
## task                                 3.764e-05 ***
## communication:difficulty_simple         0.9426    
## communication:task                      0.1404    
## difficulty_simple:task                  0.8229    
## communication:difficulty_simple:task    0.7881    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## --- POST-HOC: Communication × Difficulty ---
## NOTE: Results may be misleading due to involvement in interactions
## difficulty_simple = Easy:
##  contrast       estimate         SE    df t.ratio p.value
##  Chat - Jitsi 0.05747183 0.08643776 46.00   0.665  0.5094
## 
## difficulty_simple = Optimal:
##  contrast       estimate         SE    df t.ratio p.value
##  Chat - Jitsi     nonEst         NA    NA      NA      NA
## 
## difficulty_simple = Hard:
##  contrast       estimate         SE    df t.ratio p.value
##  Chat - Jitsi 0.04991659 0.08697385 47.14   0.574  0.5687
## 
## Results are averaged over the levels of: task 
## Degrees-of-freedom method: kenward-roger 
## 
## --- MARGINAL MEANS ---
##  communication difficulty_simple task   emmean         SE    df lower.CL
##  Chat          Easy              HP   3.720093 0.06530181 59.71 3.589457
##  Jitsi         Easy              HP   3.690741 0.06497471 58.55 3.560706
##  Chat          Optimal           HP     nonEst         NA    NA       NA
##  Jitsi         Optimal           HP     nonEst         NA    NA       NA
##  Chat          Hard              HP   3.697085 0.06563307 60.90 3.565839
##  Jitsi         Hard              HP   3.687779 0.06563920 60.91 3.556522
##  Chat          Easy              Math 3.646296 0.06497471 58.55 3.516262
##  Jitsi         Easy              Math 3.560705 0.06564828 60.94 3.429430
##  Chat          Optimal           Math 3.646296 0.06098704 45.60 3.523507
##  Jitsi         Optimal           Math 3.571977 0.06107340 45.86 3.449032
##  Chat          Hard              Math 3.646181 0.06618248 62.90 3.513922
##  Jitsi         Hard              Math 3.555654 0.06620807 62.99 3.423347
##  upper.CL
##  3.850729
##  3.820776
##        NA
##        NA
##  3.828331
##  3.819037
##  3.776331
##  3.691979
##  3.769086
##  3.694921
##  3.778440
##  3.687960
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## ============================================================
## MEDIATOR: AROUSAL 
## ============================================================
## Datencheck für arousal :
## - Communication levels: Jitsi, Chat 
## - Difficulty levels: Optimal, Hard, Easy, NA 
## - Task levels: Math, HP 
## - Anzahl Teams: 40 
## Verwende Formel: arousal_score ~ communication * difficulty_simple * task + (1|participant.code) + (1|team_id)
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- MODELL ZUSAMMENFASSUNG ---
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.formula(formula_str)
##    Data: mediator_data
## 
## REML criterion at convergence: 1908.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.7020 -0.4783 -0.0189  0.5926  3.7020 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 2.9172   1.7080  
##  team_id          (Intercept) 0.1546   0.3932  
##  Residual                     0.4876   0.6983  
## Number of obs: 690, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                                     Estimate Std. Error
## (Intercept)                                        3.924e+00  2.546e-01
## communicationJitsi                                 2.589e-01  3.596e-01
## difficulty_simpleOptimal                          -4.993e-15  1.104e-01
## difficulty_simpleHard                             -3.025e-02  1.313e-01
## taskMath                                           4.464e-01  1.288e-01
## communicationJitsi:difficulty_simpleOptimal       -3.808e-02  1.586e-01
## communicationJitsi:difficulty_simpleHard           2.174e-02  1.849e-01
## communicationJitsi:taskMath                        5.301e-02  1.832e-01
## difficulty_simpleHard:taskMath                     5.217e-02  1.868e-01
## communicationJitsi:difficulty_simpleHard:taskMath -4.114e-02  2.643e-01
##                                                           df t value Pr(>|t|)
## (Intercept)                                        4.775e+01  15.413  < 2e-16
## communicationJitsi                                 4.750e+01   0.720 0.475096
## difficulty_simpleOptimal                           5.619e+02   0.000 1.000000
## difficulty_simpleHard                              5.621e+02  -0.230 0.817885
## taskMath                                           5.620e+02   3.464 0.000572
## communicationJitsi:difficulty_simpleOptimal        5.620e+02  -0.240 0.810310
## communicationJitsi:difficulty_simpleHard           5.622e+02   0.118 0.906453
## communicationJitsi:taskMath                        5.621e+02   0.289 0.772460
## difficulty_simpleHard:taskMath                     5.624e+02   0.279 0.780095
## communicationJitsi:difficulty_simpleHard:taskMath  5.622e+02  -0.156 0.876343
##                                                      
## (Intercept)                                       ***
## communicationJitsi                                   
## difficulty_simpleOptimal                             
## difficulty_simpleHard                                
## taskMath                                          ***
## communicationJitsi:difficulty_simpleOptimal          
## communicationJitsi:difficulty_simpleHard             
## communicationJitsi:taskMath                          
## difficulty_simpleHard:taskMath                       
## communicationJitsi:difficulty_simpleHard:taskMath    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cmmncJ dffc_O dffc_H tskMth cmJ:_O cmJ:_H cmmJ:M df_H:M
## commnctnJts -0.708                                                        
## dffclty_smO  0.000  0.000                                                 
## dffclty_smH -0.253  0.179  0.000                                          
## taskMath    -0.258  0.183 -0.571  0.499                                   
## cmmnctnJ:_O  0.000  0.000 -0.696  0.000  0.398                            
## cmmnctnJ:_H  0.179 -0.249  0.000 -0.710 -0.354  0.000                     
## cmmnctnJt:M  0.182 -0.252  0.402 -0.351 -0.703 -0.583  0.489              
## dffclty_H:M  0.178 -0.126  0.394 -0.705 -0.690 -0.274  0.501  0.485       
## cmmncJ:_H:M -0.126  0.175 -0.279  0.498  0.487  0.403 -0.699 -0.692 -0.707
## fit warnings:
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- ANOVA ERGEBNISSE ---
## Missing cells for: difficulty_simpleOptimal:taskHP, communicationChat:difficulty_simpleOptimal:taskHP, communicationJitsi:difficulty_simpleOptimal:taskHP.  
## Interpret type III hypotheses with care.
## Type III Analysis of Variance Table with Satterthwaite's method
##                                       Sum Sq Mean Sq NumDF  DenDF F value
## communication                         0.3303  0.3303     1  38.12  0.6774
## difficulty_simple                     0.0739  0.0369     2 562.25  0.0758
## task                                 26.5473 26.5473     1 562.46 54.4407
## communication:difficulty_simple       0.0229  0.0114     2 562.25  0.0235
## communication:task                    0.0292  0.0292     1 562.46  0.0600
## difficulty_simple:task                0.0279  0.0279     1 562.22  0.0572
## communication:difficulty_simple:task  0.0118  0.0118     1 562.22  0.0242
##                                         Pr(>F)    
## communication                           0.4156    
## difficulty_simple                       0.9270    
## task                                 5.784e-13 ***
## communication:difficulty_simple         0.9768    
## communication:task                      0.8066    
## difficulty_simple:task                  0.8111    
## communication:difficulty_simple:task    0.8763    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## --- POST-HOC: Communication × Difficulty ---
## NOTE: Results may be misleading due to involvement in interactions
## difficulty_simple = Easy:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.2853851 0.3479915 41.71  -0.820  0.4168
## 
## difficulty_simple = Optimal:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi     nonEst        NA    NA      NA      NA
## 
## difficulty_simple = Hard:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.2865557 0.3490781 42.23  -0.821  0.4163
## 
## Results are averaged over the levels of: task 
## Degrees-of-freedom method: kenward-roger 
## 
## --- MARGINAL MEANS ---
##  communication difficulty_simple task   emmean        SE    df lower.CL
##  Chat          Easy              HP   3.924452 0.2546118 47.78 3.412459
##  Jitsi         Easy              HP   4.183333 0.2539283 47.27 3.672573
##  Chat          Optimal           HP     nonEst        NA    NA       NA
##  Jitsi         Optimal           HP     nonEst        NA    NA       NA
##  Chat          Hard              HP   3.894204 0.2553029 48.29 3.380963
##  Jitsi         Hard              HP   4.174827 0.2553163 48.30 3.661562
##  Chat          Easy              Math 4.370833 0.2539283 47.27 3.860073
##  Jitsi         Easy              Math 4.682723 0.2553369 48.31 4.169420
##  Chat          Optimal           Math 4.370833 0.2457965 41.53 3.874630
##  Jitsi         Optimal           Math 4.644644 0.2459716 41.65 4.148130
##  Chat          Hard              Math 4.392752 0.2564540 49.16 3.877430
##  Jitsi         Hard              Math 4.685240 0.2565119 49.20 4.169813
##  upper.CL
##  4.436445
##  4.694094
##        NA
##        NA
##  4.407445
##  4.688093
##  4.881594
##  5.196025
##  4.867036
##  5.141159
##  4.908074
##  5.200667
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## ============================================================
## MEDIATOR: VALENCE 
## ============================================================
## Datencheck für valence :
## - Communication levels: Jitsi, Chat 
## - Difficulty levels: Optimal, Hard, Easy, NA 
## - Task levels: Math, HP 
## - Anzahl Teams: 40 
## Verwende Formel: valence_score ~ communication * difficulty_simple * task + (1|participant.code) + (1|team_id)
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## boundary (singular) fit: see help('isSingular')
## 
## --- MODELL ZUSAMMENFASSUNG ---
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.formula(formula_str)
##    Data: mediator_data
## 
## REML criterion at convergence: 1838.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -4.4389 -0.4633  0.0130  0.4573  4.1114 
## 
## Random effects:
##  Groups           Name        Variance  Std.Dev. 
##  participant.code (Intercept) 1.477e+00 1.215e+00
##  team_id          (Intercept) 2.964e-19 5.444e-10
##  Residual                     4.985e-01 7.061e-01
## Number of obs: 690, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                                     Estimate Std. Error
## (Intercept)                                        6.475e+00  1.824e-01
## communicationJitsi                                 3.255e-01  2.573e-01
## difficulty_simpleOptimal                          -7.632e-15  1.116e-01
## difficulty_simpleHard                              4.656e-02  1.327e-01
## taskMath                                          -8.829e-01  1.303e-01
## communicationJitsi:difficulty_simpleOptimal       -1.863e-02  1.603e-01
## communicationJitsi:difficulty_simpleHard          -6.814e-02  1.870e-01
## communicationJitsi:taskMath                        1.907e-01  1.852e-01
## difficulty_simpleHard:taskMath                    -6.506e-02  1.888e-01
## communicationJitsi:difficulty_simpleHard:taskMath  1.534e-01  2.672e-01
##                                                           df t value Pr(>|t|)
## (Intercept)                                        1.872e+02  35.491  < 2e-16
## communicationJitsi                                 1.854e+02   1.265    0.207
## difficulty_simpleOptimal                           5.607e+02   0.000    1.000
## difficulty_simpleHard                              5.613e+02   0.351    0.726
## taskMath                                           5.611e+02  -6.777 3.11e-11
## communicationJitsi:difficulty_simpleOptimal        5.611e+02  -0.116    0.908
## communicationJitsi:difficulty_simpleHard           5.613e+02  -0.364    0.716
## communicationJitsi:taskMath                        5.613e+02   1.029    0.304
## difficulty_simpleHard:taskMath                     5.617e+02  -0.345    0.731
## communicationJitsi:difficulty_simpleHard:taskMath  5.615e+02   0.574    0.566
##                                                      
## (Intercept)                                       ***
## communicationJitsi                                   
## difficulty_simpleOptimal                             
## difficulty_simpleHard                                
## taskMath                                          ***
## communicationJitsi:difficulty_simpleOptimal          
## communicationJitsi:difficulty_simpleHard             
## communicationJitsi:taskMath                          
## difficulty_simpleHard:taskMath                       
## communicationJitsi:difficulty_simpleHard:taskMath    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cmmncJ dffc_O dffc_H tskMth cmJ:_O cmJ:_H cmmJ:M df_H:M
## commnctnJts -0.709                                                        
## dffclty_smO  0.000  0.000                                                 
## dffclty_smH -0.356  0.253  0.000                                          
## taskMath    -0.364  0.258 -0.571  0.499                                   
## cmmnctnJ:_O  0.000  0.000 -0.696  0.000  0.398                            
## cmmnctnJ:_H  0.253 -0.352  0.000 -0.710 -0.354  0.000                     
## cmmnctnJt:M  0.256 -0.356  0.402 -0.351 -0.703 -0.583  0.489              
## dffclty_H:M  0.251 -0.178  0.394 -0.705 -0.690 -0.275  0.501  0.485       
## cmmncJ:_H:M -0.178  0.247 -0.279  0.498  0.488  0.403 -0.699 -0.692 -0.707
## fit warnings:
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
## 
## 
## --- ANOVA ERGEBNISSE ---
## Missing cells for: difficulty_simpleOptimal:taskHP, communicationChat:difficulty_simpleOptimal:taskHP, communicationJitsi:difficulty_simpleOptimal:taskHP.  
## Interpret type III hypotheses with care.
## Type III Analysis of Variance Table with Satterthwaite's method
##                                      Sum Sq Mean Sq NumDF  DenDF  F value
## communication                         1.685   1.685     1 117.69   3.3807
## difficulty_simple                     0.087   0.043     2 561.54   0.0872
## task                                 67.965  67.965     1 561.97 136.3313
## communication:difficulty_simple       0.108   0.054     2 561.54   0.1082
## communication:task                    1.988   1.988     1 561.97   3.9879
## difficulty_simple:task                0.004   0.004     1 561.47   0.0076
## communication:difficulty_simple:task  0.164   0.164     1 561.47   0.3295
##                                       Pr(>F)    
## communication                        0.06848 .  
## difficulty_simple                    0.91650    
## task                                 < 2e-16 ***
## communication:difficulty_simple      0.89745    
## communication:task                   0.04631 *  
## difficulty_simple:task               0.93071    
## communication:difficulty_simple:task 0.56617    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## --- POST-HOC: Communication × Difficulty ---
## NOTE: Results may be misleading due to involvement in interactions
## difficulty_simple = Easy:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.4208334 0.2404496 46.64  -1.750  0.0867
## 
## difficulty_simple = Optimal:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi     nonEst        NA    NA      NA      NA
## 
## difficulty_simple = Hard:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.4293767 0.2420442 47.87  -1.774  0.0824
## 
## Results are averaged over the levels of: task 
## Degrees-of-freedom method: kenward-roger 
## 
## --- MARGINAL MEANS ---
##  communication difficulty_simple task   emmean        SE    df lower.CL
##  Chat          Easy              HP   6.474517 0.1824283 61.54 6.109794
##  Jitsi         Easy              HP   6.800000 0.1814589 60.28 6.437063
##  Chat          Optimal           HP     nonEst        NA    NA       NA
##  Jitsi         Optimal           HP     nonEst        NA    NA       NA
##  Chat          Hard              HP   6.521076 0.1834094 62.84 6.154543
##  Jitsi         Hard              HP   6.778419 0.1834270 62.85 6.411852
##  Chat          Easy              Math 5.591667 0.1814589 60.28 5.228730
##  Jitsi         Easy              Math 6.107851 0.1834536 62.88 5.741234
##  Chat          Optimal           Math 5.591667 0.1696259 46.21 5.250269
##  Jitsi         Optimal           Math 6.089219 0.1698829 46.49 5.747359
##  Chat          Hard              Math 5.573164 0.1850355 65.02 5.203624
##  Jitsi         Hard              Math 6.174574 0.1851114 65.11 5.804893
##  upper.CL
##  6.839240
##  7.162937
##        NA
##        NA
##  6.887609
##  7.144985
##  5.954604
##  6.474467
##  5.933064
##  6.431079
##  5.942703
##  6.544255
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## ============================================================
## MEDIATOR: INDIVIDUAL_MOTIVATION 
## ============================================================
## Datencheck für individual_motivation :
## - Communication levels: Jitsi, Chat 
## - Difficulty levels: Optimal, Hard, Easy, NA 
## - Task levels: Math, HP 
## - Anzahl Teams: 40 
## Verwende Formel: individual_motivation_score ~ communication * difficulty_simple * task + (1|participant.code) + (1|team_id)
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## boundary (singular) fit: see help('isSingular')
## 
## --- MODELL ZUSAMMENFASSUNG ---
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.formula(formula_str)
##    Data: mediator_data
## 
## REML criterion at convergence: 1441.8
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.7167 -0.4394  0.0393  0.4132  3.1309 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.8451   0.9193  
##  team_id          (Intercept) 0.0000   0.0000  
##  Residual                     0.2766   0.5260  
## Number of obs: 690, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                                     Estimate Std. Error
## (Intercept)                                        5.246e+00  1.374e-01
## communicationJitsi                                 3.092e-01  1.939e-01
## difficulty_simpleOptimal                          -1.031e-13  8.316e-02
## difficulty_simpleHard                              5.077e-02  9.888e-02
## taskMath                                          -1.130e-01  9.704e-02
## communicationJitsi:difficulty_simpleOptimal        1.026e-03  1.194e-01
## communicationJitsi:difficulty_simpleHard          -4.412e-02  1.393e-01
## communicationJitsi:taskMath                       -2.258e-01  1.380e-01
## difficulty_simpleHard:taskMath                    -3.416e-02  1.406e-01
## communicationJitsi:difficulty_simpleHard:taskMath  3.728e-02  1.990e-01
##                                                           df t value Pr(>|t|)
## (Intercept)                                        1.859e+02  38.170   <2e-16
## communicationJitsi                                 1.842e+02   1.595    0.112
## difficulty_simpleOptimal                           5.613e+02   0.000    1.000
## difficulty_simpleHard                              5.619e+02   0.513    0.608
## taskMath                                           5.617e+02  -1.165    0.245
## communicationJitsi:difficulty_simpleOptimal        5.617e+02   0.009    0.993
## communicationJitsi:difficulty_simpleHard           5.619e+02  -0.317    0.752
## communicationJitsi:taskMath                        5.619e+02  -1.637    0.102
## difficulty_simpleHard:taskMath                     5.623e+02  -0.243    0.808
## communicationJitsi:difficulty_simpleHard:taskMath  5.621e+02   0.187    0.851
##                                                      
## (Intercept)                                       ***
## communicationJitsi                                   
## difficulty_simpleOptimal                             
## difficulty_simpleHard                                
## taskMath                                             
## communicationJitsi:difficulty_simpleOptimal          
## communicationJitsi:difficulty_simpleHard             
## communicationJitsi:taskMath                          
## difficulty_simpleHard:taskMath                       
## communicationJitsi:difficulty_simpleHard:taskMath    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cmmncJ dffc_O dffc_H tskMth cmJ:_O cmJ:_H cmmJ:M df_H:M
## commnctnJts -0.709                                                        
## dffclty_smO  0.000  0.000                                                 
## dffclty_smH -0.352  0.250  0.000                                          
## taskMath    -0.360  0.255 -0.571  0.499                                   
## cmmnctnJ:_O  0.000  0.000 -0.696  0.000  0.398                            
## cmmnctnJ:_H  0.250 -0.348  0.000 -0.710 -0.354  0.000                     
## cmmnctnJt:M  0.253 -0.352  0.402 -0.351 -0.703 -0.583  0.489              
## dffclty_H:M  0.249 -0.176  0.394 -0.705 -0.690 -0.275  0.501  0.485       
## cmmncJ:_H:M -0.176  0.244 -0.279  0.498  0.488  0.403 -0.699 -0.692 -0.707
## fit warnings:
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
## 
## 
## --- ANOVA ERGEBNISSE ---
## Missing cells for: difficulty_simpleOptimal:taskHP, communicationChat:difficulty_simpleOptimal:taskHP, communicationJitsi:difficulty_simpleOptimal:taskHP.  
## Interpret type III hypotheses with care.
## Type III Analysis of Variance Table with Satterthwaite's method
##                                      Sum Sq Mean Sq NumDF  DenDF F value
## communication                        0.2858  0.2858     1 118.27  1.0331
## difficulty_simple                    0.0525  0.0262     2 562.12  0.0948
## task                                 6.0749  6.0749     1 562.54 21.9593
## communication:difficulty_simple      0.0185  0.0093     2 562.12  0.0335
## communication:task                   1.1938  1.1938     1 562.54  4.3154
## difficulty_simple:task               0.0067  0.0067     1 562.05  0.0243
## communication:difficulty_simple:task 0.0097  0.0097     1 562.05  0.0351
##                                         Pr(>F)    
## communication                          0.31152    
## difficulty_simple                      0.90956    
## task                                 3.497e-06 ***
## communication:difficulty_simple        0.96708    
## communication:task                     0.03822 *  
## difficulty_simple:task                 0.87615    
## communication:difficulty_simple:task   0.85149    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## --- POST-HOC: Communication × Difficulty ---
## NOTE: Results may be misleading due to involvement in interactions
## difficulty_simple = Easy:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.1962762 0.1814717 46.38  -1.082  0.2850
## 
## difficulty_simple = Optimal:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi     nonEst        NA    NA      NA      NA
## 
## difficulty_simple = Hard:
##  contrast       estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.1707918 0.1826447 47.57  -0.935  0.3545
## 
## Results are averaged over the levels of: task 
## Degrees-of-freedom method: kenward-roger 
## 
## --- MARGINAL MEANS ---
##  communication difficulty_simple task   emmean        SE    df lower.CL
##  Chat          Easy              HP   5.246358 0.1374488 60.80 4.971494
##  Jitsi         Easy              HP   5.555556 0.1367346 59.58 5.282006
##  Chat          Optimal           HP     nonEst        NA    NA       NA
##  Jitsi         Optimal           HP     nonEst        NA    NA       NA
##  Chat          Hard              HP   5.297130 0.1381716 62.06 5.020934
##  Jitsi         Hard              HP   5.562204 0.1381846 62.07 5.285983
##  Chat          Easy              Math 5.133333 0.1367346 59.58 4.859784
##  Jitsi         Easy              Math 5.216688 0.1382043 62.10 4.940430
##  Chat          Optimal           Math 5.133333 0.1280274 45.97 4.875622
##  Jitsi         Optimal           Math 5.217714 0.1282164 46.23 4.959663
##  Chat          Hard              Math 5.149950 0.1393698 64.17 4.871541
##  Jitsi         Hard              Math 5.226460 0.1394260 64.26 4.947946
##  upper.CL
##  5.521222
##  5.829105
##        NA
##        NA
##  5.573326
##  5.838425
##  5.406883
##  5.492946
##  5.391044
##  5.475765
##  5.428360
##  5.504974
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## ============================================================
## MEDIATOR: TEAM_MOTIVATION 
## ============================================================
## Datencheck für team_motivation :
## - Communication levels: Jitsi, Chat 
## - Difficulty levels: Optimal, Hard, Easy, NA 
## - Task levels: Math, HP 
## - Anzahl Teams: 40 
## Verwende Formel: team_motivation_score ~ communication * difficulty_simple * task + (1|participant.code) + (1|team_id)
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- MODELL ZUSAMMENFASSUNG ---
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: as.formula(formula_str)
##    Data: mediator_data
## 
## REML criterion at convergence: 1060.4
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -3.7258 -0.3680  0.0024  0.3678  4.3190 
## 
## Random effects:
##  Groups           Name        Variance Std.Dev.
##  participant.code (Intercept) 0.35782  0.5982  
##  team_id          (Intercept) 0.06871  0.2621  
##  Residual                     0.16261  0.4032  
## Number of obs: 690, groups:  participant.code, 120; team_id, 40
## 
## Fixed effects:
##                                                     Estimate Std. Error
## (Intercept)                                        4.558e+00  1.106e-01
## communicationJitsi                                 1.980e-02  1.560e-01
## difficulty_simpleOptimal                           9.976e-15  6.376e-02
## difficulty_simpleHard                              1.170e-02  7.581e-02
## taskMath                                           1.031e-01  7.440e-02
## communicationJitsi:difficulty_simpleOptimal        2.686e-02  9.156e-02
## communicationJitsi:difficulty_simpleHard           1.515e-02  1.068e-01
## communicationJitsi:taskMath                        1.722e-02  1.058e-01
## difficulty_simpleHard:taskMath                    -1.096e-02  1.078e-01
## communicationJitsi:difficulty_simpleHard:taskMath -2.781e-02  1.526e-01
##                                                           df t value Pr(>|t|)
## (Intercept)                                        5.780e+01  41.226   <2e-16
## communicationJitsi                                 5.727e+01   0.127    0.899
## difficulty_simpleOptimal                           5.608e+02   0.000    1.000
## difficulty_simpleHard                              5.615e+02   0.154    0.877
## taskMath                                           5.612e+02   1.386    0.166
## communicationJitsi:difficulty_simpleOptimal        5.612e+02   0.293    0.769
## communicationJitsi:difficulty_simpleHard           5.615e+02   0.142    0.887
## communicationJitsi:taskMath                        5.614e+02   0.163    0.871
## difficulty_simpleHard:taskMath                     5.620e+02  -0.102    0.919
## communicationJitsi:difficulty_simpleHard:taskMath  5.617e+02  -0.182    0.855
##                                                      
## (Intercept)                                       ***
## communicationJitsi                                   
## difficulty_simpleOptimal                             
## difficulty_simpleHard                                
## taskMath                                             
## communicationJitsi:difficulty_simpleOptimal          
## communicationJitsi:difficulty_simpleHard             
## communicationJitsi:taskMath                          
## difficulty_simpleHard:taskMath                       
## communicationJitsi:difficulty_simpleHard:taskMath    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) cmmncJ dffc_O dffc_H tskMth cmJ:_O cmJ:_H cmmJ:M df_H:M
## commnctnJts -0.709                                                        
## dffclty_smO  0.000  0.000                                                 
## dffclty_smH -0.336  0.238  0.000                                          
## taskMath    -0.343  0.243 -0.571  0.499                                   
## cmmnctnJ:_O  0.000  0.000 -0.696  0.000  0.398                            
## cmmnctnJ:_H  0.238 -0.332  0.000 -0.710 -0.354  0.000                     
## cmmnctnJt:M  0.242 -0.335  0.402 -0.351 -0.703 -0.583  0.489              
## dffclty_H:M  0.237 -0.168  0.394 -0.705 -0.690 -0.275  0.501  0.485       
## cmmncJ:_H:M -0.167  0.233 -0.279  0.498  0.488  0.403 -0.699 -0.692 -0.707
## fit warnings:
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
## 
## --- ANOVA ERGEBNISSE ---
## Missing cells for: difficulty_simpleOptimal:taskHP, communicationChat:difficulty_simpleOptimal:taskHP, communicationJitsi:difficulty_simpleOptimal:taskHP.  
## Interpret type III hypotheses with care.
## Type III Analysis of Variance Table with Satterthwaite's method
##                                       Sum Sq Mean Sq NumDF  DenDF F value
## communication                        0.01166 0.01166     1  38.13  0.0717
## difficulty_simple                    0.03585 0.01792     2 561.74  0.1102
## task                                 1.09716 1.09716     1 562.17  6.7472
## communication:difficulty_simple      0.03143 0.01571     2 561.74  0.0966
## communication:task                   0.00031 0.00031     1 562.17  0.0019
## difficulty_simple:task               0.01727 0.01727     1 561.68  0.1062
## communication:difficulty_simple:task 0.00540 0.00540     1 561.68  0.0332
##                                        Pr(>F)   
## communication                        0.790272   
## difficulty_simple                    0.895645   
## task                                 0.009635 **
## communication:difficulty_simple      0.907904   
## communication:task                   0.965375   
## difficulty_simple:task               0.744631   
## communication:difficulty_simple:task 0.855462   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## --- POST-HOC: Communication × Difficulty ---
## NOTE: Results may be misleading due to involvement in interactions
## difficulty_simple = Easy:
##  contrast        estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.02840939 0.1469563 45.39  -0.193  0.8476
## 
## difficulty_simple = Optimal:
##  contrast        estimate        SE    df t.ratio p.value
##  Chat - Jitsi      nonEst        NA    NA      NA      NA
## 
## difficulty_simple = Hard:
##  contrast        estimate        SE    df t.ratio p.value
##  Chat - Jitsi -0.02965812 0.1478058 46.44  -0.201  0.8418
## 
## Results are averaged over the levels of: task 
## Degrees-of-freedom method: kenward-roger 
## 
## --- MARGINAL MEANS ---
##  communication difficulty_simple task   emmean        SE    df lower.CL
##  Chat          Easy              HP   4.557980 0.1105618 57.98 4.336665
##  Jitsi         Easy              HP   4.577778 0.1100416 56.93 4.357417
##  Chat          Optimal           HP     nonEst        NA    NA       NA
##  Jitsi         Optimal           HP     nonEst        NA    NA       NA
##  Chat          Hard              HP   4.569682 0.1110891 59.07 4.347399
##  Jitsi         Hard              HP   4.604631 0.1110992 59.09 4.382329
##  Chat          Easy              Math 4.661111 0.1100416 56.93 4.440750
##  Jitsi         Easy              Math 4.698132 0.1111138 59.11 4.475803
##  Chat          Optimal           Math 4.661111 0.1037019 45.03 4.452248
##  Jitsi         Optimal           Math 4.724991 0.1038387 45.26 4.515883
##  Chat          Hard              Math 4.661854 0.1119643 60.90 4.437960
##  Jitsi         Hard              Math 4.686221 0.1120046 60.98 4.462253
##  upper.CL
##  4.779295
##  4.798139
##        NA
##        NA
##  4.791965
##  4.826934
##  4.881472
##  4.920462
##  4.869974
##  4.934099
##  4.885748
##  4.910190
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95
# ================================================================================
# TEIL 4: ZUSAMMENFASSUNG SIGNIFIKANTER UNTERSCHIEDE
# ================================================================================

print("\n\n=== ZUSAMMENFASSUNG SIGNIFIKANTER UNTERSCHIEDE ===")
## [1] "\n\n=== ZUSAMMENFASSUNG SIGNIFIKANTER UNTERSCHIEDE ==="
significant_differences <- data.frame()

for(mediator in names(mediator_results)) {
  result <- mediator_results[[mediator]]
  
  if(!is.null(result$posthoc_comm_diff)) {
    sig_tests <- result$posthoc_comm_diff %>%
      filter(p.value < 0.05) %>%
      mutate(
        mediator = gsub("_score$", "", mediator),
        effect_direction = ifelse(estimate > 0, "Jitsi > Chat", "Chat > Jitsi"),
        effect_size = abs(estimate)
      )
    
    if(nrow(sig_tests) > 0) {
      significant_differences <- bind_rows(significant_differences, sig_tests)
    }
  }
}

if(nrow(significant_differences) > 0) {
  print("SIGNIFIKANTE CHAT vs JITSI UNTERSCHIEDE:")
  significant_summary <- significant_differences %>% 
    select(mediator, difficulty_simple, effect_direction, estimate, p.value) %>%
    arrange(difficulty_simple, p.value)
  print(significant_summary)
} else {
  print("Keine signifikanten Unterschiede zwischen Chat und Jitsi gefunden.")
}
## [1] "SIGNIFIKANTE CHAT vs JITSI UNTERSCHIEDE:"
##              mediator difficulty_simple effect_direction   estimate    p.value
## 1 information_sharing              Easy     Chat > Jitsi -0.6524979 0.01773990
## 2     synchronization              Easy     Chat > Jitsi -0.4662716 0.03515921
## 3 information_sharing              Hard     Chat > Jitsi -0.6808938 0.01405640
## 4     synchronization              Hard     Chat > Jitsi -0.4680119 0.03551231
# ================================================================================
# TEIL 5: SPEZIFISCHE EASY vs HARD ANALYSEN
# ================================================================================

print("\n=== SPEZIFISCHE ANALYSEN: EASY vs HARD ===")
## [1] "\n=== SPEZIFISCHE ANALYSEN: EASY vs HARD ==="
for(difficulty_level in c("Easy", "Hard")) {
  cat("\n", rep("=", 50), "\n", sep = "")
  cat("DIFFICULTY LEVEL:", difficulty_level, "\n")
  cat(rep("=", 50), "\n")
  
  difficulty_data <- mediator_analysis_data %>%
    filter(difficulty_simple == difficulty_level)
  
  print(paste("Daten für", difficulty_level, ":", nrow(difficulty_data), "Beobachtungen"))
  
  for(mediator in mediator_cols) {
    mediator_name <- gsub("_score$", "", mediator)
    
    analysis_data <- difficulty_data %>%
      filter(!is.na(!!sym(mediator)))
    
    if(nrow(analysis_data) < 10) next
    
    tryCatch({
      model <- lmer(as.formula(paste(mediator, "~ communication + task + (1|participant.code) + (1|team_id)")), 
                   data = analysis_data)
      
      model_summary <- summary(model)
      communication_effect <- model_summary$coefficients["communicationJitsi", ]
      
      if(communication_effect["Pr(>|t|)"] < 0.05) {
        cat("\n*** SIGNIFIKANT:", mediator_name, "in", difficulty_level, "***\n")
        cat("Jitsi vs Chat Effekt:", round(communication_effect["Estimate"], 3), 
            ", p =", round(communication_effect["Pr(>|t|)"], 4), "\n")
        
        direction <- ifelse(communication_effect["Estimate"] > 0, "Jitsi > Chat", "Chat > Jitsi")
        cat("Richtung:", direction, "\n")
        
        # Mittelwerte
        means_data <- analysis_data %>%
          group_by(communication) %>%
          summarise(mean_value = mean(!!sym(mediator), na.rm = TRUE), .groups = "drop")
        
        cat("Mittelwerte: Chat =", round(means_data$mean_value[means_data$communication == "Chat"], 3),
            ", Jitsi =", round(means_data$mean_value[means_data$communication == "Jitsi"], 3), "\n")
      }
      
    }, error = function(e) {
      # Stumm weitermachen
    })
  }
}
## 
## ==================================================
## DIFFICULTY LEVEL: Easy 
## = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = 
## [1] "Daten für Easy : 234 Beobachtungen"
## 
## *** SIGNIFIKANT: information_sharing in Easy ***
## Jitsi vs Chat Effekt: 0.677 , p = 0.0029 
## Richtung: Jitsi > Chat 
## Mittelwerte: Chat = 5.189 , Jitsi = 5.88 
## 
## *** SIGNIFIKANT: synchronization in Easy ***
## Jitsi vs Chat Effekt: 0.484 , p = 0.0124 
## Richtung: Jitsi > Chat 
## Mittelwerte: Chat = 5.205 , Jitsi = 5.702
## boundary (singular) fit: see help('isSingular')
## boundary (singular) fit: see help('isSingular')
## Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv, :
## Model failed to converge with max|grad| = 0.00222798 (tol = 0.002, component 1)
## 
## ==================================================
## DIFFICULTY LEVEL: Hard 
## = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = 
## [1] "Daten für Hard : 218 Beobachtungen"
## 
## *** SIGNIFIKANT: stress in Hard ***
## Jitsi vs Chat Effekt: -0.427 , p = 0.0457 
## Richtung: Chat > Jitsi 
## Mittelwerte: Chat = 2.694 , Jitsi = 2.26 
## 
## *** SIGNIFIKANT: information_sharing in Hard ***
## Jitsi vs Chat Effekt: 0.665 , p = 0.0027 
## Richtung: Jitsi > Chat 
## Mittelwerte: Chat = 5.205 , Jitsi = 5.864 
## 
## *** SIGNIFIKANT: synchronization in Hard ***
## Jitsi vs Chat Effekt: 0.484 , p = 0.0108 
## Richtung: Jitsi > Chat 
## Mittelwerte: Chat = 5.21 , Jitsi = 5.707
## boundary (singular) fit: see help('isSingular')
## 
## *** SIGNIFIKANT: valence in Hard ***
## Jitsi vs Chat Effekt: 0.524 , p = 0.0197 
## Richtung: Jitsi > Chat 
## Mittelwerte: Chat = 6.11 , Jitsi = 6.609
## boundary (singular) fit: see help('isSingular')
# ================================================================================
# TEIL 6: VISUALISIERUNGEN
# ================================================================================

print("\n=== VISUALISIERUNGEN ===")
## [1] "\n=== VISUALISIERUNGEN ==="
for(mediator in names(mediator_results)) {
  result <- mediator_results[[mediator]]
  
  if(!is.null(result$marginal_means)) {
    mediator_name <- gsub("_score$", "", mediator)
    
    plot_data <- result$marginal_means %>%
      mutate(communication = factor(communication, levels = c("Chat", "Jitsi")))
    
    p <- ggplot(plot_data, aes(x = difficulty_simple, y = emmean, 
                              color = communication, group = communication)) +
      geom_line(size = 1.2, alpha = 0.8, position = position_dodge(width = 0.1)) +
      geom_point(size = 3, position = position_dodge(width = 0.1)) +
      geom_errorbar(aes(ymin = emmean - SE, ymax = emmean + SE), 
                    width = 0.1, alpha = 0.7, position = position_dodge(width = 0.1)) +
      facet_wrap(~ task) +
      scale_color_manual(values = c("Chat" = "#E31A1C", "Jitsi" = "#1F78B4")) +
      labs(
        title = paste("Mediator:", str_to_title(gsub("_", " ", mediator_name))),
        subtitle = "Communication × Difficulty by Task",
        x = "Difficulty Level",
        y = paste("Estimated", str_to_title(gsub("_", " ", mediator_name))),
        color = "Communication"
      ) +
      theme_minimal() +
      theme(
        plot.title = element_text(size = 14, face = "bold"),
        axis.title = element_text(size = 12),
        strip.text = element_text(size = 12, face = "bold")
      )
    
    print(p)
    assign(paste0("p_", gsub("_", "", mediator_name)), p, envir = .GlobalEnv)
  }
}
## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_point()`).

## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_point()`).

## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_point()`).

## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_point()`).

## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_point()`).

## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_point()`).

## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_point()`).

## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_point()`).

# ================================================================================
# TEIL 7: INTERPRETATION FÜR SHARED FLOW PARADOX
# ================================================================================

print("\n=== INTERPRETATION FÜR SHARED FLOW PARADOX ===")
## [1] "\n=== INTERPRETATION FÜR SHARED FLOW PARADOX ==="
cat("\nSHARED FLOW BEFUND:\n")
## 
## SHARED FLOW BEFUND:
cat("- Chat: Flow sinkt mit zunehmender Schwierigkeit\n")
## - Chat: Flow sinkt mit zunehmender Schwierigkeit
cat("- Jitsi: Flow steigt mit zunehmender Schwierigkeit\n\n")
## - Jitsi: Flow steigt mit zunehmender Schwierigkeit
if(nrow(significant_differences) > 0) {
  easy_effects <- significant_differences %>% filter(difficulty_simple == "Easy")
  hard_effects <- significant_differences %>% filter(difficulty_simple == "Hard")
  
  if(nrow(easy_effects) > 0) {
    cat("Bei LEICHTEN Aufgaben:\n")
    for(i in 1:nrow(easy_effects)) {
      cat("- ", easy_effects$mediator[i], ": ", easy_effects$effect_direction[i], 
          " (p = ", round(easy_effects$p.value[i], 3), ")\n", sep = "")
    }
  }
  
  if(nrow(hard_effects) > 0) {
    cat("\nBei SCHWEREN Aufgaben:\n")
    for(i in 1:nrow(hard_effects)) {
      cat("- ", hard_effects$mediator[i], ": ", hard_effects$effect_direction[i], 
          " (p = ", round(hard_effects$p.value[i], 3), ")\n", sep = "")
    }
  }
  
  # Identifiziere sich ändernde Muster
  pattern_changes <- significant_differences %>%
    select(mediator, difficulty_simple, effect_direction) %>%
    pivot_wider(names_from = difficulty_simple, values_from = effect_direction) %>%
    filter(!is.na(Easy) & !is.na(Hard) & Easy != Hard)
  
  if(nrow(pattern_changes) > 0) {
    cat("\nMEDIATOREN MIT SICH ÄNDERNDEN MUSTERN:\n")
    for(i in 1:nrow(pattern_changes)) {
      cat("- ", pattern_changes$mediator[i], ": Easy (", pattern_changes$Easy[i], 
          ") vs Hard (", pattern_changes$Hard[i], ")\n", sep = "")
    }
    cat("\n*** DIESE KÖNNTEN DAS SHARED FLOW PARADOX ERKLÄREN! ***\n")
  }
  
} else {
  cat("Keine signifikanten Mediator-Unterschiede gefunden.\n")
}
## Bei LEICHTEN Aufgaben:
## - information_sharing: Chat > Jitsi (p = 0.018)
## - synchronization: Chat > Jitsi (p = 0.035)
## 
## Bei SCHWEREN Aufgaben:
## - information_sharing: Chat > Jitsi (p = 0.014)
## - synchronization: Chat > Jitsi (p = 0.036)
print("\n=== ANALYSEN ABGESCHLOSSEN ===")
## [1] "\n=== ANALYSEN ABGESCHLOSSEN ==="

Mediation analysis in comparison with data from experiment 1

# Erweiterte Mediationsanalyse: Integration alter und neuer Datensätze
# Vergleich von SP, MP (ohne Kommunikation), MP+Chat und MP+Jitsi

library(dplyr)
library(tidyr)
library(lme4)
library(lmerTest)
library(rmcorr)
library(ggplot2)
library(psych)

# ================================================================================
# TEIL 1: ALTE DATEN KORREKT AUFBEREITEN
# ================================================================================

print("--- KORRIGIERE ALTE DATEN ---")
## [1] "--- KORRIGIERE ALTE DATEN ---"
# 1.1 Mapping von Order-Strings zu Difficulties (vereinfacht)
create_difficulty_mapping <- function(order_string) {
  # Teile Order-String auf
  order_parts <- strsplit(order_string, "-")[[1]]
  
  # Prüfe ob genau 4 Parts vorhanden sind (vollständiges Experiment)
  if(length(order_parts) != 4) {
    return(NULL)  # Unvollständige Daten
  }
  
  # Mapping: Position im String -> Difficulty
  difficulty_map <- data.frame(
    round = 1:length(order_parts),
    difficulty_code = order_parts,
    stringsAsFactors = FALSE
  ) %>%
    mutate(
      difficulty = case_when(
        difficulty_code == "BOREDOM" ~ "Easy",
        difficulty_code == "AUTONOMY" ~ "Optimal_Selected", 
        difficulty_code == "FLOW" ~ "Optimal_Calibrated",
        difficulty_code == "OVERLOAD" ~ "Hard",
        TRUE ~ NA_character_
      )
    )
  
  return(difficulty_map)
}

# 1.2 Erweiterte Aufbereitung + Filter in einem Schritt
old_rounds_corrected <- data_old_rounds %>%
  mutate(
    participant.code = SubjectID,
    team_id = SessionID,
    comm = case_when(
      Treatment == "MP" ~ "Together_None",
      Treatment == "SP" ~ "Alone",
      TRUE ~ NA_character_
    ),
    task = "Math",
    order = as.character(ConditionOrder),
    condition_num = as.numeric(sub("_.*", "", Condition)),
    difficulty = case_when(
      condition_num == 1 ~ "Easy",
      condition_num == 2 ~ "Optimal_Calibrated",
      condition_num == 3 ~ "Optimal_Selected",
      condition_num == 4 ~ "Hard",
      TRUE ~ NA_character_
    ),
    flow_score = flowFKS_9,
    stress_value = stress,
    individual_motivation_value = motivation,
    valence_value = valence,
    arousal_value = arousal,
    information_sharing_value = infSharing,
    synchronization_value = teamSynch
  ) %>%
  select(-condition_num) %>%
  filter(
    !is.na(order) & order != "" &
    lengths(strsplit(order, "-")) == 4
  )

# 1.3 Berechne Round-Nummer basierend auf Order und Condition (vereinfacht)
old_rounds_with_rounds <- old_rounds_corrected %>%
  group_by(participant.code) %>%
  do({
    participant_data <- .
    order_string <- unique(participant_data$order)[1]
    
    difficulty_mapping <- create_difficulty_mapping(order_string)
    
    if(!is.null(difficulty_mapping)) {
      # Verbinde mit Difficulty-Mapping
      participant_data %>%
        left_join(difficulty_mapping, by = "difficulty") %>%
        select(-difficulty_code)  # Entferne temporäre Spalte
    } else {
      # Sollte nicht passieren da wir bereits gefiltert haben
      participant_data %>% mutate(round = NA)
    }
  }) %>%
  ungroup()

print(paste("Alte Daten korrekt aufbereitet:", nrow(old_rounds_with_rounds), "Beobachtungen"))
## [1] "Alte Daten korrekt aufbereitet: 603 Beobachtungen"
# 1.4 Korrigierte Team Composition/Motivation + TEAM-FAKTOREN Integration (UNVERÄNDERT)
old_final_corrected <- data_old_final %>%
  filter(Condition == "PHASE") %>%
  transmute(
    participant.code = SubjectID,
    team_id = SessionID,
    # comm behalten wir, aber nicht für den Join verwenden (vermeidet .x/.y-Probleme)
    comm = case_when(
      Treatment == "MP" ~ "Together_None",
      Treatment == "SP" ~ "Alone",
      TRUE ~ NA_character_
    ),
    task = "Math",
    # Direkte Ableitung aus vorhandenen Spalten
    team_motivation_value = as.numeric(grpEffort),
    team_size_score = rowMeans(cbind(
      8 - grpSizeTooLarge,
      8 - grpSizeTooSmall,
      grpSizeJustRight
    ), na.rm = TRUE),
    team_composition_value = rowMeans(cbind(
      # team_size_score ist eine temporäre Spalte in transmute, wir müssen sie wieder berechnen:
      rowMeans(cbind(8 - grpSizeTooLarge, 8 - grpSizeTooSmall, grpSizeJustRight), na.rm = TRUE),
      grpDivers,
      grpSkill
    ), na.rm = TRUE),
    
    # ============================================================================
    # NEU: Team-Faktoren aus den verfügbaren Variablen
    # ============================================================================
    # Interdependence: 3 Items (int1 umgepolt basierend auf der Alpha-Analyse)
    interdependence_value = interdepMutualDependenc,
    
    # Common Goal: 5 Items verfügbar (cg1, cg3, cg5 umgepolt basierend auf der Alpha-Analyse)
    common_goal_value = rowMeans(cbind(
      clearGoal,      # Item 2 (entspricht cg2)
      challGoal,          # Item 3 (entspricht cg3)
      8 - noChallGoal,    # Item 4 umgepolt (entspricht cg4)
      8- noConseqGoal,       # Item 5 umgepolt (entspricht cg5)
      conseqGoal      # Item 6(entspricht cg6)
      # Item 1 (cg1) ist nicht verfügbar in alten Daten
    ), na.rm = TRUE),
    
    # Means for Coordination: 2 Items (mc1 umgepolt basierend auf der Alpha-Analyse)
    means_coordination_value = rowMeans(cbind(
      8 - coordDifficult,  # Item 1 umgepolt (entspricht mc1)
      coordPossible        # Item 2 (entspricht mc2)
    ), na.rm = TRUE),
    
    # ============================================================================
    # ERWEITERTE TEAM-FAKTOREN für detaillierte Analyse
    # ============================================================================
    
    # Group Size (aus den drei vorhandenen Items - bereits richtig gepolt)
    group_size_value = rowMeans(cbind(
      8 - grpSizeTooLarge,   # Item 1: "Gruppe ist nicht zu groß" 
      8 - grpSizeTooSmall,   # Item 2: "Gruppe ist nicht zu klein"
      grpSizeJustRight       # Item 3: "Gruppengröße ist genau richtig"
    ), na.rm = TRUE),
    
    # Group Diversity (direkt verfügbar, nur ein Item)
    group_diversity_value = grpDivers,
    
    # Group Skill (direkt verfügbar, nur ein Item) 
    group_skill_value = grpSkill,
    
    # Communication Required (direkt verfügbar - entspricht int2)
    communication_required_value = interdepCommRequired,
    
    # Work Independence (sirekt verfügbar - entspricht int1)
    work_independence_value = interdepOwnJob,
    
    # Social Presence (direkt verfügbar)
    social_presence_value = socPres,
    
    # Perceived Task Complexity (nicht verfügbar in alten Daten)
    perceived_task_complexity_value = NA_real_
  ) %>%
  select(participant.code, team_id, comm, task, team_composition_value, team_motivation_value,
         interdependence_value, common_goal_value, means_coordination_value,
         # Erweiterte Team-Faktoren
         group_size_value, group_diversity_value, group_skill_value,
         communication_required_value, work_independence_value, social_presence_value, perceived_task_complexity_value)

# Funktion zur Vereinheitlichung der participant.code Strings
normalize_participant_code <- function(pc) {
  pc %>%
    tolower() %>%
    gsub("-", "_", .) %>%
    gsub("_+", "_", .) %>%
    gsub("^_|_$", "", .) %>%
    # jetzt umstellen: falls "_mp_" oder "_sp_" in der Mitte, verschiebe ans Ende
    gsub("^(session_\\d+)_(mp|sp)_(.+)$", "\\1_\\3_\\2", .)
}

normalize_team_id <- function(tid) {
  tid %>%
    tolower() %>%
    gsub("session_(\\d+)[a-z]", "session_\\1", .) %>%  # Entfernt Buchstaben hinter Zahl in session_65b
    gsub("-", "_", .) %>%
    gsub("_na$", "", .) %>%
    gsub("_+", "_", .) %>%
    gsub("^_|_$", "", .)
}

# Alte Runden-Daten normalisieren
old_rounds_with_rounds <- old_rounds_with_rounds %>%
  mutate(
    participant.code = normalize_participant_code(participant.code),
    team_id = normalize_team_id(team_id)
  )

# Alte Team-Daten normalisieren
old_final_corrected <- old_final_corrected %>%
  mutate(
    participant.code = normalize_participant_code(participant.code),
    session_num = sub("^(session_\\d+)_.*", "\\1", participant.code),
    treatment = sub(".*_(mp|sp)$", "\\1", participant.code),
    treatment = ifelse(treatment %in% c("mp", "sp"), treatment, NA_character_),
    team_id = paste0(session_num, "_", treatment)
  ) %>%
  dplyr::select(-session_num, -treatment)

# ============================================================================
# NEU: POST-RUNDEN STRUKTUR FÜR ALTE DATEN
# ============================================================================

# Normale Runden (nur rundenweise Mediatoren)
old_rounds_normal <- old_rounds_with_rounds %>%
  select(participant.code, team_id, comm, task, difficulty, order, round,
         flow_score, stress_value, individual_motivation_value, 
         valence_value, arousal_value, information_sharing_value, 
         synchronization_value) %>%
  # Setze Team-Faktoren auf NA für normale Runden
  mutate(
    team_composition_value = NA_real_,
    team_motivation_value = NA_real_,
    interdependence_value = NA_real_,
    common_goal_value = NA_real_,
    means_coordination_value = NA_real_,
    # Erweiterte Team-Faktoren auch auf NA
    group_size_value = NA_real_,
    group_diversity_value = NA_real_,
    group_skill_value = NA_real_,
    communication_required_value = NA_real_,
    work_independence_value = NA_real_,
    social_presence_value = NA_real_,
    perceived_task_complexity_value = NA_real_
  )

# Post-Runden (nur Team-Faktoren)
old_rounds_post <- old_rounds_with_rounds %>%
  distinct(participant.code, team_id, comm, task, order) %>%
  left_join(old_final_corrected %>% select(-comm), 
            by = c("participant.code", "team_id", "task")) %>%
  mutate(
    round = "Post",
    difficulty = "Post",
    # Rundenweise Mediatoren auf NA
    flow_score = NA_real_,
    stress_value = NA_real_,
    individual_motivation_value = NA_real_,
    valence_value = NA_real_,
    arousal_value = NA_real_,
    information_sharing_value = NA_real_,
    synchronization_value = NA_real_
  ) %>%
  select(participant.code, team_id, comm, task, difficulty, order, round,
         flow_score, stress_value, individual_motivation_value, 
         valence_value, arousal_value, information_sharing_value, 
         synchronization_value, team_composition_value, team_motivation_value,
         interdependence_value, common_goal_value, means_coordination_value,
         # Erweiterte Team-Faktoren
         group_size_value, group_diversity_value, group_skill_value,
         communication_required_value, work_independence_value, social_presence_value, perceived_task_complexity_value)

# Kombiniere alte normale + Post-Runden (Typ-Anpassung)
old_rounds_final <- bind_rows(
  old_rounds_normal %>% mutate(round = as.character(round)),
  old_rounds_post
)

message("Rows in old_rounds_final (mit Post-Runden):", nrow(old_rounds_final))
## Rows in old_rounds_final (mit Post-Runden):754
# Quick-check wie viele NAs in den Team-Variablen:
message("NAs in team_composition_value after join: ", sum(is.na(old_rounds_final$team_composition_value)))
## NAs in team_composition_value after join: 641
message("NAs in team_motivation_value after join: ", sum(is.na(old_rounds_final$team_motivation_value)))
## NAs in team_motivation_value after join: 645
message("NAs in interdependence_value after join: ", sum(is.na(old_rounds_final$interdependence_value)))
## NAs in interdependence_value after join: 648
message("NAs in common_goal_value after join: ", sum(is.na(old_rounds_final$common_goal_value)))
## NAs in common_goal_value after join: 641
message("NAs in means_coordination_value after join: ", sum(is.na(old_rounds_final$means_coordination_value)))
## NAs in means_coordination_value after join: 641
# ================================================================================
# FÜR NEUE DATEN: Team-Faktoren mit korrekter Umpolung + POST-RUNDEN
# ================================================================================

# 2.1 Prüfe verfügbare Difficulties in flow_clean
print("Verfügbare Difficulties in flow_clean:")
## [1] "Verfügbare Difficulties in flow_clean:"
difficulty_check <- flow_clean %>%
  filter(comm %in% c("Chat", "Jitsi")) %>%
  group_by(difficulty, task) %>%
  dplyr::summarise(n = n(), .groups = "drop")
print(difficulty_check)
## # A tibble: 7 × 3
##   difficulty         task      n
##   <chr>              <chr> <int>
## 1 Easy               HP      118
## 2 Easy               Math    116
## 3 Hard               HP      112
## 4 Hard               Math    106
## 5 Medium             HP      115
## 6 Optimal_Calibrated Math    119
## 7 Optimal_Selected   Math    119
# 2.2 Erstelle Order-Strings für neue Daten
create_order_string_new <- function(participant_data) {
  # Sortiere nach einer eindeutigen Reihenfolge (z.B. basierend auf Row-Number im originalen Datensatz)
  ordered_data <- participant_data %>%
    arrange(participant.code, task, comm) %>%
    group_by(participant.code, task, comm) %>%
    mutate(round = row_number()) %>%
    ungroup()
  
  # Erstelle Order-String
  order_summary <- ordered_data %>%
    group_by(participant.code, task, comm) %>%
    dplyr::summarise(
      order = paste0("['", paste(difficulty, collapse = "', '"), "']"),
      .groups = "drop"
    )
  
  return(list(data_with_rounds = ordered_data, order_summary = order_summary))
}

# 2.3 Wende auf neue Daten an
new_data_processing <- flow_clean %>%
  filter(comm %in% c("Chat", "Jitsi")) %>%
  group_by(participant.code, task, comm) %>%
  group_split() %>%
  map_dfr(~{
    group_data <- .x
    
    # Füge Round-Nummern hinzu
    group_data %>%
      arrange(participant.code, task, comm) %>%
      mutate(round = row_number())
  })

# 2.4 Erstelle Order-Zusammenfassung
order_summary_new <- new_data_processing %>%
  group_by(participant.code, task, comm) %>%
  dplyr::summarise(
    order = paste0("['", paste(difficulty, collapse = "', '"), "']"),
    .groups = "drop"
  )

# 2.5 Extrahiere Mediatoren für neue Daten (korrigiert)
print("Extrahiere Mediatoren für neue Daten...")
## [1] "Extrahiere Mediatoren für neue Daten..."
# Funktion für Math + HP Mediator-Extraktion
extract_mediators_new <- function(data, var_pattern, var_name, last_round_only = FALSE) {
  if(last_round_only) {
    # Für einmalige Mediatoren: letzte Runde pro Task
    patterns <- c(
      paste0("mathJitsi\\.6\\.player\\.", var_pattern, "$"),
      paste0("mathChat\\.6\\.player\\.", var_pattern, "$"),
      paste0("HiddenProfile_Jitsi\\.3\\.player\\.", var_pattern, "$"),
      paste0("HiddenProfile_Chat\\.3\\.player\\.", var_pattern, "$")
    )
  } else {
    # Für rundenweise Mediatoren: alle relevanten Runden
    patterns <- c(
      paste0("(mathJitsi|mathChat)\\.[3-6]\\.player\\.", var_pattern, "$"),
      paste0("(HiddenProfile_Jitsi|HiddenProfile_Chat)\\.[1-3]\\.player\\.", var_pattern, "$")
    )
  }
  
  combined_pattern <- paste(patterns, collapse = "|")
  
  med_data <- data %>%
    dplyr::select(participant.code, matches(combined_pattern)) %>%
    pivot_longer(cols = -participant.code, names_to = "variable", values_to = "value") %>%
    filter(!is.na(value)) %>%
    mutate(
      task = case_when(
        grepl("^math", variable) ~ "Math",
        grepl("^HiddenProfile", variable) ~ "HP"
      ),
      comm = case_when(
        grepl("Jitsi", variable) ~ "Jitsi",
        grepl("Chat", variable) ~ "Chat"
      ),
      round_raw = as.numeric(gsub(".*\\.(\\d+)\\.player.*", "\\1", variable))
    )
  
  if(!last_round_only) {
    # Für rundenweise: berechne standardisierte Round-Nummer
    med_data <- med_data %>%
      mutate(
        round = case_when(
          task == "Math" ~ round_raw - 2,  # Math: 3-6 -> 1-4
          task == "HP" ~ round_raw         # HP: 1-3 -> 1-3
        )
      ) %>%
      filter((task == "Math" & round_raw >= 3 & round_raw <= 6) |
             (task == "HP" & round_raw >= 1 & round_raw <= 3))
  }
  
  # Aggregiere Items falls mehrere vorhanden
  if(last_round_only) {
    result <- med_data %>%
      group_by(participant.code, task, comm) %>%
      summarise(!!paste0(var_name, "_value") := mean(value, na.rm = TRUE), .groups = "drop")
  } else {
    result <- med_data %>%
      group_by(participant.code, task, comm, round) %>%
      summarise(!!paste0(var_name, "_value") := mean(value, na.rm = TRUE), .groups = "drop")
  }
  
  return(result)
}

# Extrahiere alle Mediatoren
stress_new <- extract_mediators_new(data, "is[1-5]", "stress")
info_sharing_new <- extract_mediators_new(data, "info[1-2]", "information_sharing") 
sync_new <- extract_mediators_new(data, "ec1", "synchronization")
arousal_new <- extract_mediators_new(data, "arousal", "arousal")
valence_new <- extract_mediators_new(data, "pleasure", "valence")
motivation_new <- extract_mediators_new(data, "tm[1-3]", "individual_motivation")

# Einmalige Mediatoren
team_comp_new <- extract_mediators_new(data, "(tsz[1-3]|td[1-3]|tsc[1-3])", "team_composition", last_round_only = TRUE)
team_motiv_new <- extract_mediators_new(data, "te[1-3]", "team_motivation", last_round_only = TRUE)

# Team-Faktoren mit korrekter Umpolung
# Interdependence: Item 1 umpolung
interdependence_new <- data %>%
  dplyr::select(participant.code, matches("(mathJitsi|mathChat)\\.6\\.player\\.int[3]$|HiddenProfile_(Jitsi|Chat)\\.3\\.player\\.int[3]$")) %>%
  pivot_longer(cols = -participant.code, names_to = "variable", values_to = "value") %>%
  filter(!is.na(value)) %>%
  mutate(
    task = case_when(
      grepl("^math", variable) ~ "Math",
      grepl("^HiddenProfile", variable) ~ "HP"
    ),
    comm = case_when(
      grepl("Jitsi", variable) ~ "Jitsi",
      grepl("Chat", variable) ~ "Chat"
    ),
    item = gsub(".*\\.(int\\d+)$", "\\1", variable),
    # Umpolung für int1
    value_corrected = ifelse(item == "int1", 8 - value, value)
  ) %>%
  group_by(participant.code, task, comm) %>%
  summarise(interdependence_value = mean(value_corrected, na.rm = TRUE), .groups = "drop")

# Common Goal: Items 1, 3, 5 Umpolung  
common_goal_new <- data %>%
  dplyr::select(participant.code, matches("(mathJitsi|mathChat)\\.6\\.player\\.cg[1-6]$|HiddenProfile_(Jitsi|Chat)\\.3\\.player\\.cg[1-6]$")) %>%
  pivot_longer(cols = -participant.code, names_to = "variable", values_to = "value") %>%
  filter(!is.na(value)) %>%
  mutate(
    task = case_when(
      grepl("^math", variable) ~ "Math",
      grepl("^HiddenProfile", variable) ~ "HP"
    ),
    comm = case_when(
      grepl("Jitsi", variable) ~ "Jitsi",
      grepl("Chat", variable) ~ "Chat"
    ),
    item = gsub(".*\\.(cg\\d+)$", "\\1", variable),
    # Umpolung für cg1, cg3, cg5
    value_corrected = ifelse(item %in% c("cg1", "cg3", "cg5"), 8 - value, value)
  ) %>%
  group_by(participant.code, task, comm) %>%
  summarise(common_goal_value = mean(value_corrected, na.rm = TRUE), .groups = "drop")

# Means Coordination: Item 1 Umpolung
means_coordination_new <- data %>%
  dplyr::select(participant.code, matches("(mathJitsi|mathChat)\\.6\\.player\\.mc[1-2]$|HiddenProfile_(Jitsi|Chat)\\.3\\.player\\.mc[1-2]$")) %>%
  pivot_longer(cols = -participant.code, names_to = "variable", values_to = "value") %>%
  filter(!is.na(value)) %>%
  mutate(
    task = case_when(
      grepl("^math", variable) ~ "Math",
      grepl("^HiddenProfile", variable) ~ "HP"
    ),
    comm = case_when(
      grepl("Jitsi", variable) ~ "Jitsi",
      grepl("Chat", variable) ~ "Chat"
    ),
    item = gsub(".*\\.(mc\\d+)$", "\\1", variable),
    # Umpolung für mc1
    value_corrected = ifelse(item == "mc1", 8 - value, value)
  ) %>%
  group_by(participant.code, task, comm) %>%
  summarise(means_coordination_value = mean(value_corrected, na.rm = TRUE), .groups = "drop")

# ================================================================================
# ERWEITERTE TEAM-FAKTOREN FÜR NEUE DATEN
# ================================================================================

print("Extrahiere erweiterte Team-Faktoren für neue Daten...")
## [1] "Extrahiere erweiterte Team-Faktoren für neue Daten..."
# Group Size: 3 Items (tsz1-3)
group_size_new <- data %>%
  dplyr::select(participant.code, matches("(mathJitsi|mathChat)\\.6\\.player\\.tsz[1-3]$|HiddenProfile_(Jitsi|Chat)\\.3\\.player\\.tsz[1-3]$")) %>%
  pivot_longer(cols = -participant.code, names_to = "variable", values_to = "value") %>%
  filter(!is.na(value)) %>%
  mutate(
    task = case_when(
      grepl("^math", variable) ~ "Math",
      grepl("^HiddenProfile", variable) ~ "HP"
    ),
    comm = case_when(
      grepl("Jitsi", variable) ~ "Jitsi",
      grepl("Chat", variable) ~ "Chat"
    ),
    item = gsub(".*\\.(tsz\\d+)$", "\\1", variable)
    # Keine Umpolung nötig für tsz Items
  ) %>%
  group_by(participant.code, task, comm) %>%
  summarise(group_size_value = mean(value, na.rm = TRUE), .groups = "drop")

# Group Diversity: 3 Items (td1-3)
group_diversity_new <- data %>%
  dplyr::select(participant.code, matches("(mathJitsi|mathChat)\\.6\\.player\\.td[1-3]$|HiddenProfile_(Jitsi|Chat)\\.3\\.player\\.td[1-3]$")) %>%
  pivot_longer(cols = -participant.code, names_to = "variable", values_to = "value") %>%
  filter(!is.na(value)) %>%
  mutate(
    task = case_when(
      grepl("^math", variable) ~ "Math",
      grepl("^HiddenProfile", variable) ~ "HP"
    ),
    comm = case_when(
      grepl("Jitsi", variable) ~ "Jitsi",
      grepl("Chat", variable) ~ "Chat"
    ),
    item = gsub(".*\\.(td\\d+)$", "\\1", variable)
    # Keine Umpolung nötig für td Items
  ) %>%
  group_by(participant.code, task, comm) %>%
  summarise(group_diversity_value = mean(value, na.rm = TRUE), .groups = "drop")

# Group Skill: 3 Items (tsc1-3)
group_skill_new <- data %>%
  dplyr::select(participant.code, matches("(mathJitsi|mathChat)\\.6\\.player\\.tsc[1-3]$|HiddenProfile_(Jitsi|Chat)\\.3\\.player\\.tsc[1-3]$")) %>%
  pivot_longer(cols = -participant.code, names_to = "variable", values_to = "value") %>%
  filter(!is.na(value)) %>%
  mutate(
    task = case_when(
      grepl("^math", variable) ~ "Math",
      grepl("^HiddenProfile", variable) ~ "HP"
    ),
    comm = case_when(
      grepl("Jitsi", variable) ~ "Jitsi",
      grepl("Chat", variable) ~ "Chat"
    ),
    item = gsub(".*\\.(tsc\\d+)$", "\\1", variable)
    # Keine Umpolung nötig für tsc Items
  ) %>%
  group_by(participant.code, task, comm) %>%
  summarise(group_skill_value = mean(value, na.rm = TRUE), .groups = "drop")

# Communication Required: Einzelnes Item (int2)
communication_required_new <- data %>%
  dplyr::select(participant.code, matches("(mathJitsi|mathChat)\\.6\\.player\\.int2$|HiddenProfile_(Jitsi|Chat)\\.3\\.player\\.int2$")) %>%
  pivot_longer(cols = -participant.code, names_to = "variable", values_to = "value") %>%
  filter(!is.na(value)) %>%
  mutate(
    task = case_when(
      grepl("^math", variable) ~ "Math",
      grepl("^HiddenProfile", variable) ~ "HP"
    ),
    comm = case_when(
      grepl("Jitsi", variable) ~ "Jitsi",
      grepl("Chat", variable) ~ "Chat"
    )
    # Keine Umpolung nötig für int2
  ) %>%
  group_by(participant.code, task, comm) %>%
  summarise(communication_required_value = mean(value, na.rm = TRUE), .groups = "drop")

# Work Independence: Einzelnes Item (int1)
work_independence_new <- data %>%
  dplyr::select(participant.code, matches("(mathJitsi|mathChat)\\.6\\.player\\.int1$|HiddenProfile_(Jitsi|Chat)\\.3\\.player\\.int1$")) %>%
  pivot_longer(cols = -participant.code, names_to = "variable", values_to = "value") %>%
  filter(!is.na(value)) %>%
  mutate(
    task = case_when(
      grepl("^math", variable) ~ "Math",
      grepl("^HiddenProfile", variable) ~ "HP"
    ),
    comm = case_when(
      grepl("Jitsi", variable) ~ "Jitsi",
      grepl("Chat", variable) ~ "Chat"
    )
    # Keine Umpolung nötig für int1
  ) %>%
  group_by(participant.code, task, comm) %>%
  summarise(work_independence_value = mean(value, na.rm = TRUE), .groups = "drop")

# Social Presence: 5 Items (psp1-5)
social_presence_new <- data %>%
  dplyr::select(participant.code, matches("(mathJitsi|mathChat)\\.6\\.player\\.psp[1-5]$|HiddenProfile_(Jitsi|Chat)\\.3\\.player\\.psp[1-5]$")) %>%
  pivot_longer(cols = -participant.code, names_to = "variable", values_to = "value") %>%
  filter(!is.na(value)) %>%
  mutate(
    task = case_when(
      grepl("^math", variable) ~ "Math",
      grepl("^HiddenProfile", variable) ~ "HP"
    ),
    comm = case_when(
      grepl("Jitsi", variable) ~ "Jitsi",
      grepl("Chat", variable) ~ "Chat"
    ),
    item = gsub(".*\\.(psp\\d+)$", "\\1", variable)
    # Keine Umpolung nötig für psp Items (außer explizit angegeben)
  ) %>%
  group_by(participant.code, task, comm) %>%
  summarise(social_presence_value = mean(value, na.rm = TRUE), .groups = "drop")

# Perceived Task Complexity: 4 Items (ptc1-4)
perceived_task_complexity_new <- data %>%
  dplyr::select(participant.code, matches("(mathJitsi|mathChat)\\.6\\.player\\.ptc[1-4]$|HiddenProfile_(Jitsi|Chat)\\.3\\.player\\.ptc[1-4]$")) %>%
  pivot_longer(cols = -participant.code, names_to = "variable", values_to = "value") %>%
  filter(!is.na(value)) %>%
  mutate(
    task = case_when(
      grepl("^math", variable) ~ "Math",
      grepl("^HiddenProfile", variable) ~ "HP"
    ),
    comm = case_when(
      grepl("Jitsi", variable) ~ "Jitsi",
      grepl("Chat", variable) ~ "Chat"
    ),
    item = gsub(".*\\.(ptc\\d+)$", "\\1", variable)
    # Keine Umpolung nötig für ptc Items (außer explizit angegeben)
  ) %>%
  group_by(participant.code, task, comm) %>%
  summarise(perceived_task_complexity_value = mean(value, na.rm = TRUE), .groups = "drop")

# ============================================================================
# NEU: POST-RUNDEN STRUKTUR FÜR NEUE DATEN
# ============================================================================

new_data_processing <- new_data_processing %>% select(-order)
joined_test <- new_data_processing %>%
  left_join(order_summary_new, by = c("participant.code", "task", "comm"))

# Normale Runden (nur rundenweise Mediatoren)
new_rounds_normal <- new_data_processing %>%
  left_join(order_summary_new, by = c("participant.code", "task", "comm")) %>%
  left_join(stress_new, by = c("participant.code", "task", "comm", "round")) %>%
  left_join(info_sharing_new, by = c("participant.code", "task", "comm", "round")) %>%
  left_join(sync_new, by = c("participant.code", "task", "comm", "round")) %>%
  left_join(arousal_new, by = c("participant.code", "task", "comm", "round")) %>%
  left_join(valence_new, by = c("participant.code", "task", "comm", "round")) %>%
  left_join(motivation_new, by = c("participant.code", "task", "comm", "round")) %>%
  mutate(
    comm = case_when(
      comm == "Jitsi" ~ "Together_Jitsi",
      comm == "Chat" ~ "Together_Chat",
      TRUE ~ comm
    )
  ) %>%
  select(participant.code, team_id, comm, task, difficulty, round, order,
         flow_score, stress_value, individual_motivation_value, 
         valence_value, arousal_value, information_sharing_value, 
         synchronization_value) %>%
  # Setze Team-Faktoren auf NA für normale Runden
  mutate(
    team_composition_value = NA_real_,
    team_motivation_value = NA_real_,
    interdependence_value = NA_real_,
    common_goal_value = NA_real_,
    means_coordination_value = NA_real_
  )

# Post-Runden (nur Team-Faktoren)
new_rounds_post <- new_data_processing %>%
  distinct(participant.code, team_id, task, comm) %>%
  left_join(order_summary_new, by = c("participant.code", "task", "comm")) %>%
  left_join(team_comp_new, by = c("participant.code", "task", "comm")) %>%
  left_join(team_motiv_new, by = c("participant.code", "task", "comm")) %>%
  left_join(interdependence_new, by = c("participant.code", "task", "comm")) %>%
  left_join(common_goal_new, by = c("participant.code", "task", "comm")) %>%
  left_join(means_coordination_new, by = c("participant.code", "task", "comm")) %>%
  # Erweiterte Team-Faktoren hinzufügen
  left_join(group_size_new, by = c("participant.code", "task", "comm")) %>%
  left_join(group_diversity_new, by = c("participant.code", "task", "comm")) %>%
  left_join(group_skill_new, by = c("participant.code", "task", "comm")) %>%
  left_join(communication_required_new, by = c("participant.code", "task", "comm")) %>%
  left_join(work_independence_new, by = c("participant.code", "task", "comm")) %>%
  left_join(social_presence_new, by = c("participant.code", "task", "comm")) %>%
  left_join(perceived_task_complexity_new, by = c("participant.code", "task", "comm")) %>%
  mutate(
    comm = case_when(
      comm == "Jitsi" ~ "Together_Jitsi",
      comm == "Chat" ~ "Together_Chat",
      TRUE ~ comm
    ),
    round = "Post",
    difficulty = "Post",
    # Rundenweise Mediatoren auf NA
    flow_score = NA_real_,
    stress_value = NA_real_,
    individual_motivation_value = NA_real_,
    valence_value = NA_real_,
    arousal_value = NA_real_,
    information_sharing_value = NA_real_,
    synchronization_value = NA_real_
  ) %>%
  select(participant.code, team_id, comm, task, difficulty, round, order,
         flow_score, stress_value, individual_motivation_value, 
         valence_value, arousal_value, information_sharing_value, 
         synchronization_value, team_composition_value, team_motivation_value,
         interdependence_value, common_goal_value, means_coordination_value,
         # Erweiterte Team-Faktoren
         group_size_value, group_diversity_value, group_skill_value,
         communication_required_value, work_independence_value, social_presence_value, perceived_task_complexity_value)

# Kombiniere neue normale + Post-Runden (Typ-Anpassung)
new_rounds_final <- bind_rows(
  new_rounds_normal %>% mutate(round = as.character(round)),
  new_rounds_post
)

print(paste("Neue Daten korrekt aufbereitet (mit Post-Runden):", nrow(new_rounds_final), "Beobachtungen"))
## [1] "Neue Daten korrekt aufbereitet (mit Post-Runden): 1045 Beobachtungen"
# ================================================================================
# FINALE INTEGRATION MIT POST-RUNDEN-STRUKTUR
# ================================================================================

print("--- FINALE INTEGRATION MIT POST-RUNDEN ---")
## [1] "--- FINALE INTEGRATION MIT POST-RUNDEN ---"
# Kombiniere korrigierte Datensätze
integrated_data_full <- bind_rows(
  old_rounds_final,
  new_rounds_final
) %>%
  mutate(
    comm = factor(comm, levels = c("Alone", "Together_None", "Together_Chat", "Together_Jitsi"))
  )

# ============================================================================
# NEUE DATENFILTER FÜR DEINE BESTEHENDEN ANALYSEN
# ============================================================================

# Für deine bestehenden Analysen: Nur normale Runden (Math Task)
integrated_data <- integrated_data_full %>% 
  filter(task == "Math", round != "Post", !is.na(flow_score)) %>%
  mutate(round = as.numeric(round))

# Für Team-Faktoren Analysen: Nur Post-Runden
integrated_data_team_factors <- integrated_data_full %>%
  filter(task == "Math", round == "Post")

print(paste("Integrierte Daten (normale Runden):", nrow(integrated_data), "Beobachtungen"))
## [1] "Integrierte Daten (normale Runden): 1039 Beobachtungen"
print(paste("Team-Faktoren Daten (Post-Runden):", nrow(integrated_data_team_factors), "Beobachtungen"))
## [1] "Team-Faktoren Daten (Post-Runden): 271 Beobachtungen"
print("✅ Post-Runden Integration erfolgreich - deine bestehenden Analysen funktionieren weiter!")
## [1] "✅ Post-Runden Integration erfolgreich - deine bestehenden Analysen funktionieren weiter!"
# ================================================================================
# FINALE INTEGRATION MIT POST-RUNDEN-STRUKTUR
# ================================================================================

print("--- FINALE INTEGRATION MIT POST-RUNDEN ---")
## [1] "--- FINALE INTEGRATION MIT POST-RUNDEN ---"
# Kombiniere korrigierte Datensätze
integrated_data_full <- bind_rows(
  old_rounds_final,
  new_rounds_final
) %>%
  mutate(
    comm = factor(comm, levels = c("Alone", "Together_None", "Together_Chat", "Together_Jitsi"))
  )

integrated_data <- integrated_data_full %>% filter(task == "Math")

integrated_data_team_factors <- integrated_data_full %>%
  filter(task == "Math", round == "Post")

print(paste("Integrierte Daten:", nrow(integrated_data), "Beobachtungen"))
## [1] "Integrierte Daten: 1334 Beobachtungen"
# ================================================================================
# TEIL 4: DESKRIPTIVE STATISTIKEN
# ================================================================================

print("=== DESKRIPTIVE STATISTIKEN DES INTEGRIERTEN DATENSATZES ===\n")
## [1] "=== DESKRIPTIVE STATISTIKEN DES INTEGRIERTEN DATENSATZES ===\n"
# Übersicht über Stichprobengrößen
sample_overview <- integrated_data %>%
  group_by(comm) %>%
  summarise(
    n_participants = n_distinct(participant.code),
    n_teams = n_distinct(team_id),
    n_observations = n(),
    .groups = "drop"
  )

print("Stichprobengrößen nach Kommunikationsbedingung:")
## [1] "Stichprobengrößen nach Kommunikationsbedingung:"
print(sample_overview)
## # A tibble: 4 × 4
##   comm           n_participants n_teams n_observations
##   <fct>                   <int>   <int>          <int>
## 1 Alone                      38      37            190
## 2 Together_None             113      39            564
## 3 Together_Chat              60      20            293
## 4 Together_Jitsi             60      20            287
# Flow Scores nach Bedingung
flow_by_comm <- integrated_data %>%
  group_by(comm) %>%
  summarise(
    flow_mean = mean(flow_score, na.rm = TRUE),
    flow_sd = sd(flow_score, na.rm = TRUE),
    .groups = "drop"
  )

print("\nFlow Scores nach Kommunikationsbedingung:")
## [1] "\nFlow Scores nach Kommunikationsbedingung:"
print(flow_by_comm)
## # A tibble: 4 × 3
##   comm           flow_mean flow_sd
##   <fct>              <dbl>   <dbl>
## 1 Alone               4.76   1.10 
## 2 Together_None       4.62   0.893
## 3 Together_Chat       5.28   1.02 
## 4 Together_Jitsi      5.55   0.992
# ================================================================================
# TEIL 5: REPEATED MEASURES KORRELATIONEN
# ================================================================================

print("\n\n=== REPEATED MEASURES CORRELATIONS ===\n")
## [1] "\n\n=== REPEATED MEASURES CORRELATIONS ===\n"
# Liste der rundenweisen Mediatoren
round_mediators <- c("stress_value", "individual_motivation_value", 
                    "valence_value", "arousal_value", 
                    "information_sharing_value", "synchronization_value")

# Funktion für rmcorr mit mehreren Gruppen
perform_rmcorr_extended <- function(data, mediator_name) {
  results <- list()
  
  for(comm_type in levels(data$comm)) {
    comm_data <- data %>% 
      filter(comm == comm_type) %>%
      filter(!is.na(get(mediator_name)) & !is.na(flow_score))
    
    if(nrow(comm_data) > 10 & n_distinct(comm_data$participant.code) > 3) {
      rmcorr_result <- rmcorr(
        participant = participant.code,
        measure1 = get(mediator_name),
        measure2 = flow_score,
        dataset = comm_data
      )
      
      results[[comm_type]] <- list(
        r = rmcorr_result$r,
        p = rmcorr_result$p,
        df = rmcorr_result$df,
        CI = rmcorr_result$CI,
        n = nrow(comm_data)
      )
    }
  }
  
  return(results)
}

# Durchführung für alle Mediatoren
rmcorr_results <- list()

for(mediator in round_mediators) {
  cat("\n", gsub("_value", "", mediator), ":\n", sep = "")
  results <- perform_rmcorr_extended(integrated_data, mediator)
  rmcorr_results[[mediator]] <- results
  
  for(comm_type in names(results)) {
    if(!is.null(results[[comm_type]])) {
      cat("  ", comm_type, ": r = ", round(results[[comm_type]]$r, 3),
          ", p = ", format.pval(results[[comm_type]]$p, digits = 3),
          ", n = ", results[[comm_type]]$n,
          ", 95% CI [", round(results[[comm_type]]$CI[1], 3), 
          ", ", round(results[[comm_type]]$CI[2], 3), "]\n", sep = "")
    }
  }
}
## 
## stress:
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
##   Alone: r = -0.358, p = 0.000144, n = 145, 95% CI [-0.512, -0.181]
##   Together_None: r = -0.467, p = <2e-16, n = 423, 95% CI [-0.55, -0.376]
##   Together_Chat: r = 0.038, p = 0.62, n = 233, 95% CI [-0.112, 0.186]
##   Together_Jitsi: r = -0.023, p = 0.771, n = 227, 95% CI [-0.173, 0.129]
## 
## individual_motivation:
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
##   Alone: r = 0.395, p = 2.37e-05, n = 145, 95% CI [0.222, 0.543]
##   Together_None: r = 0.194, p = 0.000656, n = 414, 95% CI [0.084, 0.3]
##   Together_Chat: r = 0.064, p = 0.402, n = 233, 95% CI [-0.086, 0.211]
##   Together_Jitsi: r = -0.045, p = 0.566, n = 227, 95% CI [-0.195, 0.107]
## 
## valence:
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
##   Alone: r = 0.683, p = 5.36e-16, n = 144, 95% CI [0.567, 0.773]
##   Together_None: r = 0.628, p = <2e-16, n = 421, 95% CI [0.555, 0.691]
##   Together_Chat: r = 0.046, p = 0.546, n = 233, 95% CI [-0.103, 0.194]
##   Together_Jitsi: r = -0.001, p = 0.986, n = 227, 95% CI [-0.153, 0.15]
## 
## arousal:
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
##   Alone: r = -0.167, p = 0.082, n = 147, 95% CI [-0.343, 0.021]
##   Together_None: r = -0.217, p = 0.000131, n = 418, 95% CI [-0.321, -0.107]
##   Together_Chat: r = 0.162, p = 0.0327, n = 233, 95% CI [0.014, 0.303]
##   Together_Jitsi: r = 0.09, p = 0.247, n = 227, 95% CI [-0.062, 0.238]
## 
## information_sharing:
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
##   Together_None: r = 0.294, p = 2.32e-07, n = 410, 95% CI [0.186, 0.394]
##   Together_Chat: r = 0.036, p = 0.638, n = 233, 95% CI [-0.113, 0.184]
##   Together_Jitsi: r = -0.122, p = 0.115, n = 227, 95% CI [-0.269, 0.03]
## 
## synchronization:
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
## Warning in rmcorr(participant = participant.code, measure1 =
## get(mediator_name), : 'participant.code' coerced into a factor
##   Together_None: r = 0.345, p = 6.7e-10, n = 414, 95% CI [0.242, 0.441]
##   Together_Chat: r = -0.14, p = 0.0662, n = 233, 95% CI [-0.283, 0.009]
##   Together_Jitsi: r = 0.008, p = 0.922, n = 227, 95% CI [-0.144, 0.159]
# ================================================================================
# TEIL 6: LINEAR MIXED MODELS
# ================================================================================

print("\n\n=== LINEAR MIXED MODELS ===\n")
## [1] "\n\n=== LINEAR MIXED MODELS ===\n"
# 6.1 Modelle für rundenweise Mediatoren
print("--- RUNDENWEISE MEDIATOREN ---\n")
## [1] "--- RUNDENWEISE MEDIATOREN ---\n"
for(mediator in round_mediators) {
  mediator_name <- gsub("_value", "", mediator)
  cat("\n", toupper(mediator_name), ":\n", sep = "")
  
  # Modell 1: Kommunikation -> Mediator
  formula1 <- as.formula(paste(mediator, "~ comm + (1|participant.code) + (1|round)"))
  model1 <- lmer(formula1, data = integrated_data)
  
  cat("Kommunikation -> ", mediator_name, ":\n", sep = "")
  print(summary(model1)$coefficients)
  
  # Modell 2: Mediator -> Flow
  formula2 <- as.formula(paste("flow_score ~", mediator, "+ (1|participant.code) + (1|round)"))
  model2 <- lmer(formula2, data = integrated_data)
  
  cat("\n", mediator_name, " -> Flow:\n", sep = "")
  print(summary(model2)$coefficients[2,])
  cat("\n")
}
## 
## STRESS:
## boundary (singular) fit: see help('isSingular')
## Kommunikation -> stress:
##                      Estimate Std. Error       df   t value     Pr(>|t|)
## (Intercept)         3.3492803  0.1700653 262.9980 19.694085 1.131917e-53
## commTogether_None  -0.2408439  0.1966483 263.0524 -1.224745 2.217670e-01
## commTogether_Chat  -0.4722002  0.2173869 263.1804 -2.172165 3.073613e-02
## commTogether_Jitsi -0.8914419  0.2179213 264.9560 -4.090661 5.711746e-05
## 
## stress -> Flow:
##      Estimate    Std. Error            df       t value      Pr(>|t|) 
## -2.009000e-01  2.171200e-02  1.021343e+03 -9.252950e+00  1.244108e-19 
## 
## 
## INDIVIDUAL_MOTIVATION:
## Kommunikation -> individual_motivation:
##                     Estimate Std. Error        df   t value     Pr(>|t|)
## (Intercept)        4.5649116  0.1860782  42.79669 24.532215 7.813856e-27
## commTogether_None  0.2832976  0.1905031 252.69807  1.487103 1.382344e-01
## commTogether_Chat  0.5875707  0.2102160 252.22837  2.795081 5.587547e-03
## commTogether_Jitsi 0.6735310  0.2107475 253.99350  3.195914 1.570326e-03
## 
## individual_motivation -> Flow:
##     Estimate   Std. Error           df      t value     Pr(>|t|) 
## 2.049453e-01 2.263325e-02 1.010876e+03 9.055056e+00 6.874457e-19 
## 
## 
## VALENCE:
## boundary (singular) fit: see help('isSingular')
## Kommunikation -> valence:
##                       Estimate Std. Error       df    t value     Pr(>|t|)
## (Intercept)         5.50118866  0.2154331 257.7492 25.5354818 1.499989e-72
## commTogether_None  -0.02966357  0.2489565 257.4574 -0.1191516 9.052482e-01
## commTogether_Chat   0.06853397  0.2750876 257.4647  0.2491351 8.034549e-01
## commTogether_Jitsi  0.60412850  0.2759626 259.4905  2.1891680 2.947573e-02
## 
## valence -> Flow:
##     Estimate   Std. Error           df      t value     Pr(>|t|) 
## 1.963101e-01 1.428473e-02 1.002115e+03 1.374265e+01 1.686780e-39 
## 
## 
## AROUSAL:
## Kommunikation -> arousal:
##                      Estimate Std. Error        df     t value     Pr(>|t|)
## (Intercept)        3.59070559  0.2806077  69.56356 12.79617683 5.945508e-20
## commTogether_None  0.02371405  0.2971981 260.61039  0.07979207 9.364639e-01
## commTogether_Chat  0.80263663  0.3285765 260.65665  2.44276918 1.523956e-02
## commTogether_Jitsi 1.03306402  0.3292612 262.24899  3.13752172 1.898231e-03
## 
## arousal -> Flow:
##      Estimate    Std. Error            df       t value      Pr(>|t|) 
##   -0.02761345    0.01654481 1009.09919605   -1.66901040    0.09542551 
## 
## 
## INFORMATION_SHARING:
## Kommunikation -> information_sharing:
##                      Estimate Std. Error       df    t value     Pr(>|t|)
## (Intercept)        4.66216445  0.1383211 170.1981 33.7053813 3.140417e-77
## commTogether_Chat  0.09988886  0.2318199 220.8219  0.4308898 6.669688e-01
## commTogether_Jitsi 0.91555497  0.2322610 222.2515  3.9419226 1.083085e-04
## 
## information_sharing -> Flow:
##     Estimate   Std. Error           df      t value     Pr(>|t|) 
## 1.661910e-01 2.318798e-02 7.284020e+02 7.167117e+00 1.881926e-12 
## 
## 
## SYNCHRONIZATION:
## Kommunikation -> synchronization:
##                     Estimate Std. Error        df   t value     Pr(>|t|)
## (Intercept)        4.9773286  0.1023528  51.43014 48.629143 1.135510e-44
## commTogether_Chat  0.2367206  0.1693853 219.29791  1.397528 1.636671e-01
## commTogether_Jitsi 0.6593522  0.1702065 222.06680  3.873838 1.409930e-04
## 
## synchronization -> Flow:
##     Estimate   Std. Error           df      t value     Pr(>|t|) 
## 1.436782e-01 2.080326e-02 8.585057e+02 6.906524e+00 9.665485e-12
# 6.2 Modelle für einmalige Mediatoren (ANGEPASST)
print("\n--- EINMALIGE MEDIATOREN (ANGEPASST) ---\n")
## [1] "\n--- EINMALIGE MEDIATOREN (ANGEPASST) ---\n"
# Team Composition (KORRIGIERT)
print("\nTEAM COMPOSITION:")
## [1] "\nTEAM COMPOSITION:"
# Verwende integrated_data_team_factors statt tc_data
tc_data <- integrated_data_team_factors %>%
  select(participant.code, comm, team_composition_value) %>%
  filter(!is.na(team_composition_value))

model_tc1 <- lm(team_composition_value ~ comm, data = tc_data)
print("Kommunikation -> Team Composition:")
## [1] "Kommunikation -> Team Composition:"
print(summary(model_tc1)$coefficients)
##                     Estimate Std. Error   t value      Pr(>|t|)
## (Intercept)         5.511308 0.06388258  86.27247 3.591333e-177
## commTogether_Chat  -1.865011 0.10847505 -17.19300  3.694401e-43
## commTogether_Jitsi -1.942789 0.10847505 -17.91001  1.674633e-45
# Für Mediator -> Flow Analyse: Kombiniere Team-Faktoren mit aggregierten Flow-Scores
tc_flow <- integrated_data %>%
  group_by(participant.code, comm) %>%
  summarise(
    mean_flow = mean(flow_score, na.rm = TRUE),
    .groups = "drop"
  ) %>%
  # Joine mit Team-Faktoren aus Post-Runden
  left_join(
    integrated_data_team_factors %>% 
      select(participant.code, comm, team_composition_value),
    by = c("participant.code", "comm")
  ) %>%
  filter(!is.na(team_composition_value))

model_tc2 <- lm(mean_flow ~ team_composition_value, data = tc_flow)
print("\nTeam Composition -> Flow:")
## [1] "\nTeam Composition -> Flow:"
print(summary(model_tc2)$coefficients[2,])
##      Estimate    Std. Error       t value      Pr(>|t|) 
## -2.274713e-01  4.364636e-02 -5.211691e+00  4.159856e-07
# Team Motivation (KORRIGIERT)
print("\n\nTEAM MOTIVATION:")
## [1] "\n\nTEAM MOTIVATION:"
# Verwende integrated_data_team_factors
tm_data <- integrated_data_team_factors %>%
  select(participant.code, comm, team_motivation_value) %>%
  filter(!is.na(team_motivation_value))

model_tm1 <- lm(team_motivation_value ~ comm, data = tm_data)
print("Kommunikation -> Team Motivation:")
## [1] "Kommunikation -> Team Motivation:"
print(summary(model_tm1)$coefficients)
##                      Estimate Std. Error   t value      Pr(>|t|)
## (Intercept)         5.3730887 0.09391412 57.212790 1.919135e-136
## commTogether_Chat  -0.7119776 0.15761539 -4.517183  1.009758e-05
## commTogether_Jitsi -0.6453109 0.15761539 -4.094213  5.899837e-05
# Für Mediator -> Flow Analyse
tm_flow <- integrated_data %>%
  group_by(participant.code, comm) %>%
  summarise(
    mean_flow = mean(flow_score, na.rm = TRUE),
    .groups = "drop"
  ) %>%
  # Joine mit Team-Faktoren aus Post-Runden
  left_join(
    integrated_data_team_factors %>% 
      select(participant.code, comm, team_motivation_value),
    by = c("participant.code", "comm")
  ) %>%
  filter(!is.na(team_motivation_value))

model_tm2 <- lm(mean_flow ~ team_motivation_value, data = tm_flow)
print("\nTeam Motivation -> Flow:")
## [1] "\nTeam Motivation -> Flow:"
print(summary(model_tm2)$coefficients[2,])
##    Estimate  Std. Error     t value    Pr(>|t|) 
## -0.01147998  0.05221199 -0.21987248  0.82616894
# ================================================================================
# TEIL 7: ZUSAMMENFASSENDE TABELLEN UND VISUALISIERUNGEN
# ================================================================================

# 7.1 Erstelle Übersichtstabelle der Effekte
create_summary_table_extended <- function() {
  # Platzhalter für erweiterte Zusammenfassung
  # Hier könnten die Koeffizienten aus allen Modellen extrahiert werden
  return(NULL)
}

# 7.2 Visualisierungen
print("\n\n=== VISUALISIERUNGEN ===\n")
## [1] "\n\n=== VISUALISIERUNGEN ===\n"
# Flow nach Kommunikationsbedingung
p1 <- ggplot(integrated_data, aes(x = comm, y = flow_score, fill = comm)) +
  geom_boxplot(alpha = 0.7) +
  scale_fill_manual(values = c("Alone" = "#E69F00", 
                              "Together_None" = "#56B4E9", 
                              "Together_Chat" = "#009E73", 
                              "Together_Jitsi" = "#F0E442")) +
  labs(title = "Flow Scores by Communication Condition",
       x = "Communication Condition",
       y = "Flow Score",
       fill = "Condition") +
  theme_minimal() +
  theme(axis.text.x = element_text(angle = 45, hjust = 1))

print(p1)
## Warning: Removed 295 rows containing non-finite outside the scale range
## (`stat_boxplot()`).

# Mediator-Profile nach Bedingung
mediator_profiles <- integrated_data %>%
  group_by(comm) %>%
  summarise(
    Stress = mean(stress_value, na.rm = TRUE),
    `Individual Motivation` = mean(individual_motivation_value, na.rm = TRUE),
    Valence = mean(valence_value, na.rm = TRUE),
    Arousal = mean(arousal_value, na.rm = TRUE),
    `Information Sharing` = mean(information_sharing_value, na.rm = TRUE),
    Synchronization = mean(synchronization_value, na.rm = TRUE),
    .groups = "drop"
  ) %>%
  pivot_longer(cols = -comm, names_to = "Mediator", values_to = "Score")

p2 <- ggplot(mediator_profiles, aes(x = Mediator, y = Score, fill = comm)) +
  geom_bar(stat = "identity", position = "dodge") +
  scale_fill_manual(values = c("Alone" = "#E69F00", 
                              "Together_None" = "#56B4E9", 
                              "Together_Chat" = "#009E73", 
                              "Together_Jitsi" = "#F0E442")) +
  labs(title = "Mediator Profiles by Communication Condition",
       x = "Mediator",
       y = "Mean Score",
       fill = "Condition") +
  theme_minimal() +
  theme(axis.text.x = element_text(angle = 45, hjust = 1))

print(p2)
## Warning: Removed 2 rows containing missing values or values outside the scale range
## (`geom_bar()`).

# ================================================================================
# TEIL 8: POST-HOC ANALYSEN
# ================================================================================

print("\n\n=== POST-HOC ANALYSEN ===\n")
## [1] "\n\n=== POST-HOC ANALYSEN ===\n"
# Vergleiche spezifische Kontraste
print("Geplante Kontraste:")
## [1] "Geplante Kontraste:"
# Alone vs. alle Together Bedingungen
integrated_data_contrast <- integrated_data %>%
  mutate(
    alone_vs_together = ifelse(comm == "Alone", 0, 1),
    no_comm_vs_comm = case_when(
      comm %in% c("Alone", "Together_None") ~ 0,
      comm %in% c("Together_Chat", "Together_Jitsi") ~ 1
    ),
    chat_vs_jitsi = case_when(
      comm == "Together_Chat" ~ 0,
      comm == "Together_Jitsi" ~ 1,
      TRUE ~ NA_real_
    )
  )

# Kontrast 1: Alone vs. Together
contrast1 <- lmer(flow_score ~ alone_vs_together + (1|participant.code) + (1|round), 
                 data = integrated_data_contrast)
print("\n1. Alone vs. Together (alle):")
## [1] "\n1. Alone vs. Together (alle):"
print(summary(contrast1)$coefficients)
##                    Estimate Std. Error        df   t value     Pr(>|t|)
## (Intercept)       4.7523680  0.1536488  25.14914 30.930063 1.549000e-21
## alone_vs_together 0.2680148  0.1375529 260.38018  1.948449 5.243546e-02
# Kontrast 2: Keine Kommunikation vs. Kommunikation
contrast2 <- lmer(flow_score ~ no_comm_vs_comm + (1|participant.code) + (1|round), 
                 data = integrated_data_contrast)
print("\n2. Keine Kommunikation vs. Kommunikation:")
## [1] "\n2. Keine Kommunikation vs. Kommunikation:"
print(summary(contrast2)$coefficients)
##                  Estimate Std. Error        df   t value     Pr(>|t|)
## (Intercept)     4.6462495 0.10234351   5.31603 45.398574 4.325213e-08
## no_comm_vs_comm 0.7612769 0.08507463 260.37455  8.948343 6.954731e-17
# Kontrast 3: Chat vs. Jitsi
contrast3 <- lmer(flow_score ~ chat_vs_jitsi + (1|participant.code) + (1|round), 
                 data = integrated_data_contrast %>% 
                   filter(comm %in% c("Together_Chat", "Together_Jitsi")))
print("\n3. Chat vs. Jitsi:")
## [1] "\n3. Chat vs. Jitsi:"
print(summary(contrast3)$coefficients)
##                Estimate Std. Error         df   t value     Pr(>|t|)
## (Intercept)   5.2840691  0.2124996   4.738438 24.866257 3.329623e-06
## chat_vs_jitsi 0.2517171  0.1426779 116.583138  1.764233 8.031187e-02
# ================================================================================
# SCHLUSSNOTIZEN
# ================================================================================

print("\n\n=== ANALYSEN ABGESCHLOSSEN ===")
## [1] "\n\n=== ANALYSEN ABGESCHLOSSEN ==="
print("1. Datensätze erfolgreich integriert")
## [1] "1. Datensätze erfolgreich integriert"
print("2. Repeated Measures Korrelationen für alle Bedingungen berechnet")
## [1] "2. Repeated Measures Korrelationen für alle Bedingungen berechnet"
print("3. Linear Mixed Models für erweiterten Datensatz erstellt")
## [1] "3. Linear Mixed Models für erweiterten Datensatz erstellt"
print("4. Visualisierungen der Unterschiede zwischen allen vier Bedingungen")
## [1] "4. Visualisierungen der Unterschiede zwischen allen vier Bedingungen"
print("5. Post-hoc Kontraste zur Untersuchung spezifischer Hypothesen")
## [1] "5. Post-hoc Kontraste zur Untersuchung spezifischer Hypothesen"
# Speichere integrierten Datensatz für weitere Analysen
# write.csv(integrated_data, "integrated_mediation_data.csv", row.names = FALSE)

Structural differences between the communication treatments

# ================================================================================
# STRUKTURELLE UNTERSCHIEDE ZWISCHEN KOMMUNIKATIONSFORMEN
# Umfassende Analyse aller Einflussfaktoren
# ================================================================================

library(dplyr)
library(tidyr)
library(ggplot2)
library(lme4)
library(lmerTest)
library(car)
library(emmeans)
library(effectsize)
library(corrplot)
library(RColorBrewer)
library(gridExtra)
## Warning: Paket 'gridExtra' wurde unter R Version 4.2.3 erstellt
## 
## Attache Paket: 'gridExtra'
## Das folgende Objekt ist maskiert 'package:dplyr':
## 
##     combine
library(broom)
library(broom.mixed)
library(ggpubr)
## 
## Attache Paket: 'ggpubr'
## Das folgende Objekt ist maskiert 'package:plyr':
## 
##     mutate
library(scales)
## Warning: Paket 'scales' wurde unter R Version 4.2.3 erstellt
## 
## Attache Paket: 'scales'
## Die folgenden Objekte sind maskiert von 'package:psych':
## 
##     alpha, rescale
## Das folgende Objekt ist maskiert 'package:purrr':
## 
##     discard
## Das folgende Objekt ist maskiert 'package:readr':
## 
##     col_factor
# ================================================================================
# SCHRITT 1: DATENAUFBEREITUNG UND ÜBERSICHT
# ================================================================================

print("=== SCHRITT 1: ANGEPASSTE DATENAUFBEREITUNG ===")
## [1] "=== SCHRITT 1: ANGEPASSTE DATENAUFBEREITUNG ==="
# ÄNDERUNG 1: Definiere getrennte Variablenlisten
rundenweise_vars <- c(
  "flow_score", "stress_value", "individual_motivation_value",
  "valence_value", "arousal_value", "information_sharing_value", 
  "synchronization_value"
)

team_factor_vars <- c(
  "team_composition_value", "team_motivation_value", "interdependence_value", 
  "common_goal_value", "means_coordination_value",
  # Erweiterte Team-Faktoren
  "group_size_value", "group_diversity_value", "group_skill_value",
  "communication_required_value", "work_independence_value", "social_presence_value", "perceived_task_complexity_value"
)

# Kombiniere für Gesamtanalyse
all_vars <- c(rundenweise_vars, team_factor_vars)

# ÄNDERUNG 2: Bereite beide Datensätze vor
# Für rundenweise Analysen
analysis_data_rounds <- integrated_data %>%
  mutate(
    comm = factor(comm, levels = c("Alone", "Together_None", "Together_Chat", "Together_Jitsi")),
    difficulty = factor(difficulty),
    participant.code = factor(participant.code)
  ) %>%
  filter(rowSums(is.na(select(., all_of(rundenweise_vars)))) <= 3)  # Max 50% fehlende Werte

# Für Team-Faktoren Analysen  
analysis_data_team <- integrated_data_team_factors %>%
  mutate(
    comm = factor(comm, levels = c("Alone", "Together_None", "Together_Chat", "Together_Jitsi")),
    participant.code = factor(participant.code)
  ) %>%
  filter(rowSums(is.na(select(., all_of(team_factor_vars)))) <= 5)  # Max 50% fehlende Werte

print(paste("Rundenweise Analysedaten:", nrow(analysis_data_rounds), "Beobachtungen"))
## [1] "Rundenweise Analysedaten: 1058 Beobachtungen"
print(paste("Team-Faktoren Analysedaten:", nrow(analysis_data_team), "Beobachtungen"))
## [1] "Team-Faktoren Analysedaten: 233 Beobachtungen"
# ================================================================================
# SCHRITT 2: DESKRIPTIVE STATISTIKEN
# ================================================================================

# ÄNDERUNG 3: Getrennte deskriptive Statistiken

# Rundenweise Variablen
descriptive_stats_rounds <- analysis_data_rounds %>%
  group_by(comm) %>%
  summarise(
    across(all_of(rundenweise_vars), 
           list(
             mean = ~ mean(.x, na.rm = TRUE),
             sd = ~ sd(.x, na.rm = TRUE),
             n = ~ sum(!is.na(.x))
           ),
           .names = "{.col}_{.fn}"),
    .groups = "drop"
  )

# Team-Faktoren
descriptive_stats_team <- analysis_data_team %>%
  group_by(comm) %>%
  summarise(
    across(all_of(team_factor_vars), 
           list(
             mean = ~ mean(.x, na.rm = TRUE),
             sd = ~ sd(.x, na.rm = TRUE),
             n = ~ sum(!is.na(.x))
           ),
           .names = "{.col}_{.fn}"),
    .groups = "drop"
  )

print("Deskriptive Statistiken - Rundenweise Variablen:")
## [1] "Deskriptive Statistiken - Rundenweise Variablen:"
print(descriptive_stats_rounds)
## # A tibble: 4 × 22
##   comm           flow_score_mean flow_score_sd flow_score_n stress_value_mean
##   <fct>                    <dbl>         <dbl>        <int>             <dbl>
## 1 Alone                     4.77         1.10           147              3.32
## 2 Together_None             4.62         0.893          431              3.10
## 3 Together_Chat             5.28         1.02           233              2.87
## 4 Together_Jitsi            5.55         0.992          227              2.43
## # ℹ 17 more variables: stress_value_sd <dbl>, stress_value_n <int>,
## #   individual_motivation_value_mean <dbl>,
## #   individual_motivation_value_sd <dbl>, individual_motivation_value_n <int>,
## #   valence_value_mean <dbl>, valence_value_sd <dbl>, valence_value_n <int>,
## #   arousal_value_mean <dbl>, arousal_value_sd <dbl>, arousal_value_n <int>,
## #   information_sharing_value_mean <dbl>, information_sharing_value_sd <dbl>,
## #   information_sharing_value_n <int>, synchronization_value_mean <dbl>, …
print("\nDeskriptive Statistiken - Team-Faktoren:")
## [1] "\nDeskriptive Statistiken - Team-Faktoren:"
print(descriptive_stats_team)
## # A tibble: 3 × 37
##   comm      team_composition_val…¹ team_composition_val…² team_composition_val…³
##   <fct>                      <dbl>                  <dbl>                  <int>
## 1 Together…                   5.51                  0.811                    113
## 2 Together…                   3.65                  0.520                     60
## 3 Together…                   3.57                  0.529                     60
## # ℹ abbreviated names: ¹​team_composition_value_mean,
## #   ²​team_composition_value_sd, ³​team_composition_value_n
## # ℹ 33 more variables: team_motivation_value_mean <dbl>,
## #   team_motivation_value_sd <dbl>, team_motivation_value_n <int>,
## #   interdependence_value_mean <dbl>, interdependence_value_sd <dbl>,
## #   interdependence_value_n <int>, common_goal_value_mean <dbl>,
## #   common_goal_value_sd <dbl>, common_goal_value_n <int>, …
# ================================================================================
# SCHRITT 3: MIXED-EFFECTS ANOVA FÜR JEDE VARIABLE
# ================================================================================

print("\n=== SCHRITT 3: ANGEPASSTE ANOVA-ANALYSEN ===")
## [1] "\n=== SCHRITT 3: ANGEPASSTE ANOVA-ANALYSEN ==="
# ÄNDERUNG 4: Getrennte ANOVA-Funktionen

# Funktion für rundenweise Variablen (Mixed-Effects)
analyze_round_variable <- function(var_name, data) {
  cat("\n", paste(rep("=", 60), collapse=""), "\n")
  cat("RUNDENWEISE ANALYSE FÜR:", toupper(var_name), "\n")
  cat(paste(rep("=", 60), collapse=""), "\n")
  
  var_data <- data %>%
    filter(!is.na(!!sym(var_name))) %>%
    mutate(comm = droplevels(comm))
  
  comm_counts <- var_data %>%
    group_by(comm) %>%
    summarise(n = n(), .groups = "drop")
  
  if(nrow(comm_counts) >= 2 && all(comm_counts$n >= 3)) {
    tryCatch({
      # Mixed-Effects ANOVA
      model <- lmer(as.formula(paste(var_name, "~ comm + (1|participant.code)")), 
                    data = var_data)
      
      anova_result <- anova(model)
      print(anova_result)
      
      # Effektgröße
      f_stat <- anova_result$`F value`[1]
      df1 <- anova_result$NumDF[1] 
      df2 <- anova_result$DenDF[1]
      eta_squared <- f_stat * df1 / (f_stat * df1 + df2)
      
      cat("Partielle η² =", round(eta_squared, 3), "\n")
      
      # Post-hoc Tests
      if(anova_result$`Pr(>F)`[1] < 0.05) {
        cat("\n🔍 SIGNIFIKANTER EFFEKT!\n")
        emm <- emmeans(model, "comm")
        pairwise <- pairs(emm, adjust = "bonferroni")
        
        print("Estimated Marginal Means:")
        print(emm)
        print("\nPaarweise Vergleiche:")
        print(pairwise)
        
        return(list(model = model, anova = anova_result, emmeans = emm, 
                   pairwise = pairwise, eta_squared = eta_squared, significant = TRUE))
      }
      
      return(list(model = model, anova = anova_result, eta_squared = eta_squared, significant = FALSE))
      
    }, error = function(e) {
      cat("❌ Fehler bei Mixed-Effects Modell:", e$message, "\n")
      return(NULL)
    })
  } else {
    cat("⚠️ Nicht genügend Daten\n")
    return(NULL)
  }
}

# Funktion für Team-Faktoren (einfache ANOVA)
analyze_team_variable <- function(var_name, data) {
  cat("\n", paste(rep("=", 60), collapse=""), "\n")
  cat("TEAM-FAKTOREN ANALYSE FÜR:", toupper(var_name), "\n")
  cat(paste(rep("=", 60), collapse=""), "\n")
  
  var_data <- data %>%
    filter(!is.na(!!sym(var_name))) %>%
    mutate(comm = droplevels(comm))
  
  comm_counts <- var_data %>%
    group_by(comm) %>%
    summarise(n = n(), .groups = "drop")
  
  if(nrow(comm_counts) >= 2 && all(comm_counts$n >= 3)) {
    tryCatch({
      # Einfache ANOVA (da nur eine Messung pro Person)
      model <- aov(as.formula(paste(var_name, "~ comm")), data = var_data)
      
      anova_result <- summary(model)
      print(anova_result)
      
      # Effektgröße
      if(length(anova_result) > 0 && nrow(anova_result[[1]]) > 0) {
        f_stat <- anova_result[[1]]$`F value`[1]
        df1 <- anova_result[[1]]$Df[1]
        df2 <- anova_result[[1]]$Df[2]
        
        if(!is.na(f_stat) && !is.na(df1) && !is.na(df2)) {
          eta_squared <- f_stat * df1 / (f_stat * df1 + df2)
          cat("Partielle η² =", round(eta_squared, 3), "\n")
          
          # Post-hoc Tests
          if(anova_result[[1]]$`Pr(>F)`[1] < 0.05) {
            cat("\n🔍 SIGNIFIKANTER EFFEKT!\n")
            posthoc <- TukeyHSD(model)
            print(posthoc)
            
            return(list(model = model, anova = anova_result, posthoc = posthoc, 
                       eta_squared = eta_squared, significant = TRUE))
          }
          
          return(list(model = model, anova = anova_result, eta_squared = eta_squared, significant = FALSE))
        }
      }
      
    }, error = function(e) {
      cat("❌ Fehler bei ANOVA:", e$message, "\n")
      return(NULL)
    })
  } else {
    cat("⚠️ Nicht genügend Daten\n")
    return(NULL)
  }
}

# ÄNDERUNG 5: Führe getrennte Analysen durch

# Rundenweise Variablen analysieren
round_results <- list()
for(var in rundenweise_vars) {
  result <- analyze_round_variable(var, analysis_data_rounds)
  if(!is.null(result)) {
    round_results[[var]] <- result
  }
}
## 
##  ============================================================ 
## RUNDENWEISE ANALYSE FÜR: FLOW_SCORE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##      Sum Sq Mean Sq NumDF  DenDF F value    Pr(>F)    
## comm 57.561  19.187     3 258.26   29.08 3.121e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.253 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
## [1] "Estimated Marginal Means:"
##  comm           emmean     SE  df lower.CL upper.CL
##  Alone            4.76 0.1110 264     4.54     4.98
##  Together_None    4.61 0.0649 265     4.48     4.74
##  Together_Chat    5.28 0.0884 263     5.11     5.45
##  Together_Jitsi   5.54 0.0891 268     5.36     5.71
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## [1] "\nPaarweise Vergleiche:"
##  contrast                       estimate    SE  df t.ratio p.value
##  Alone - Together_None             0.154 0.129 264   1.191  1.0000
##  Alone - Together_Chat            -0.517 0.142 263  -3.641  0.0020
##  Alone - Together_Jitsi           -0.776 0.143 265  -5.443  <.0001
##  Together_None - Together_Chat    -0.671 0.110 264  -6.116  <.0001
##  Together_None - Together_Jitsi   -0.929 0.110 267  -8.430  <.0001
##  Together_Chat - Together_Jitsi   -0.258 0.126 266  -2.059  0.2430
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests 
## 
##  ============================================================ 
## RUNDENWEISE ANALYSE FÜR: STRESS_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##      Sum Sq Mean Sq NumDF  DenDF F value    Pr(>F)    
## comm  24.59  8.1966     3 265.05  6.9489 0.0001614 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.073 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
## [1] "Estimated Marginal Means:"
##  comm           emmean     SE  df lower.CL upper.CL
##  Alone            3.32 0.1700 267     2.99     3.66
##  Together_None    3.11 0.0986 265     2.92     3.31
##  Together_Chat    2.88 0.1350 265     2.61     3.14
##  Together_Jitsi   2.46 0.1360 269     2.19     2.73
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## [1] "\nPaarweise Vergleiche:"
##  contrast                       estimate    SE  df t.ratio p.value
##  Alone - Together_None             0.212 0.197 266   1.078  1.0000
##  Alone - Together_Chat             0.446 0.217 266   2.052  0.2470
##  Alone - Together_Jitsi            0.865 0.218 268   3.971  0.0006
##  Together_None - Together_Chat     0.234 0.167 265   1.398  0.9789
##  Together_None - Together_Jitsi    0.653 0.168 268   3.888  0.0008
##  Together_Chat - Together_Jitsi    0.419 0.192 267   2.186  0.1780
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests 
## 
##  ============================================================ 
## RUNDENWEISE ANALYSE FÜR: INDIVIDUAL_MOTIVATION_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##      Sum Sq Mean Sq NumDF  DenDF F value   Pr(>F)   
## comm 15.758  5.2527     3 255.98  4.5785 0.003836 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.051 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
## [1] "Estimated Marginal Means:"
##  comm           emmean     SE  df lower.CL upper.CL
##  Alone            4.58 0.1640 264     4.26     4.90
##  Together_None    4.85 0.0954 266     4.66     5.04
##  Together_Chat    5.16 0.1300 263     4.90     5.41
##  Together_Jitsi   5.25 0.1310 267     4.99     5.50
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## [1] "\nPaarweise Vergleiche:"
##  contrast                       estimate    SE  df t.ratio p.value
##  Alone - Together_None           -0.2694 0.189 264  -1.422  0.9364
##  Alone - Together_Chat           -0.5743 0.209 263  -2.750  0.0382
##  Alone - Together_Jitsi          -0.6652 0.209 265  -3.177  0.0100
##  Together_None - Together_Chat   -0.3049 0.161 264  -1.894  0.3563
##  Together_None - Together_Jitsi  -0.3958 0.162 267  -2.447  0.0902
##  Together_Chat - Together_Jitsi  -0.0909 0.184 265  -0.494  1.0000
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests 
## 
##  ============================================================ 
## RUNDENWEISE ANALYSE FÜR: VALENCE_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##      Sum Sq Mean Sq NumDF  DenDF F value  Pr(>F)  
## comm 27.203  9.0675     3 258.85  3.2122 0.02355 *
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.036 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
## [1] "Estimated Marginal Means:"
##  comm           emmean    SE  df lower.CL upper.CL
##  Alone            5.51 0.216 267     5.08     5.93
##  Together_None    5.47 0.125 264     5.23     5.72
##  Together_Chat    5.57 0.171 264     5.23     5.91
##  Together_Jitsi   6.11 0.173 270     5.77     6.45
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## [1] "\nPaarweise Vergleiche:"
##  contrast                       estimate    SE  df t.ratio p.value
##  Alone - Together_None            0.0360 0.249 266   0.144  1.0000
##  Alone - Together_Chat           -0.0622 0.276 266  -0.226  1.0000
##  Alone - Together_Jitsi          -0.5978 0.276 268  -2.163  0.1886
##  Together_None - Together_Chat   -0.0982 0.212 264  -0.464  1.0000
##  Together_None - Together_Jitsi  -0.6338 0.213 268  -2.976  0.0191
##  Together_Chat - Together_Jitsi  -0.5356 0.243 267  -2.204  0.1705
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests 
## 
##  ============================================================ 
## RUNDENWEISE ANALYSE FÜR: AROUSAL_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##      Sum Sq Mean Sq NumDF  DenDF F value    Pr(>F)    
## comm 47.921  15.974     3 262.84  7.4444 8.386e-05 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.078 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
## [1] "Estimated Marginal Means:"
##  comm           emmean    SE  df lower.CL upper.CL
##  Alone            3.59 0.257 265     3.08     4.09
##  Together_None    3.62 0.149 266     3.32     3.91
##  Together_Chat    4.40 0.205 265     3.99     4.80
##  Together_Jitsi   4.63 0.206 269     4.23     5.04
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## [1] "\nPaarweise Vergleiche:"
##  contrast                       estimate    SE  df t.ratio p.value
##  Alone - Together_None           -0.0321 0.298 265  -0.108  1.0000
##  Alone - Together_Chat           -0.8114 0.329 265  -2.466  0.0857
##  Alone - Together_Jitsi          -1.0476 0.330 267  -3.178  0.0100
##  Together_None - Together_Chat   -0.7793 0.254 266  -3.073  0.0140
##  Together_None - Together_Jitsi  -1.0155 0.254 268  -3.991  0.0005
##  Together_Chat - Together_Jitsi  -0.2362 0.290 267  -0.813  1.0000
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests 
## 
##  ============================================================ 
## RUNDENWEISE ANALYSE FÜR: INFORMATION_SHARING_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##      Sum Sq Mean Sq NumDF  DenDF F value    Pr(>F)    
## comm 13.425  6.7124     2 221.31   8.251 0.0003499 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.069 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
## [1] "Estimated Marginal Means:"
##  comm           emmean    SE  df lower.CL upper.CL
##  Together_None    4.66 0.137 231     4.39     4.93
##  Together_Chat    4.76 0.187 228     4.39     5.13
##  Together_Jitsi   5.58 0.188 230     5.21     5.95
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## [1] "\nPaarweise Vergleiche:"
##  contrast                       estimate    SE  df t.ratio p.value
##  Together_None - Together_Chat   -0.0984 0.232 229  -0.425  1.0000
##  Together_None - Together_Jitsi  -0.9138 0.232 230  -3.935  0.0003
##  Together_Chat - Together_Jitsi  -0.8154 0.265 229  -3.078  0.0070
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 3 tests 
## 
##  ============================================================ 
## RUNDENWEISE ANALYSE FÜR: SYNCHRONIZATION_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##      Sum Sq Mean Sq NumDF  DenDF F value    Pr(>F)    
## comm 22.985  11.492     2 219.72  7.5026 0.0007049 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.064 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
## [1] "Estimated Marginal Means:"
##  comm           emmean     SE  df lower.CL upper.CL
##  Together_None    4.98 0.0999 230     4.78     5.18
##  Together_Chat    5.21 0.1370 226     4.95     5.48
##  Together_Jitsi   5.64 0.1380 231     5.37     5.91
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## [1] "\nPaarweise Vergleiche:"
##  contrast                       estimate    SE  df t.ratio p.value
##  Together_None - Together_Chat    -0.236 0.169 228  -1.392  0.4958
##  Together_None - Together_Jitsi   -0.658 0.170 230  -3.873  0.0004
##  Together_Chat - Together_Jitsi   -0.423 0.194 229  -2.183  0.0902
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 3 tests
# Team-Faktoren analysieren
team_results <- list()
for(var in team_factor_vars) {
  result <- analyze_team_variable(var, analysis_data_team)
  if(!is.null(result)) {
    team_results[[var]] <- result
  }
}
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: TEAM_COMPOSITION_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)    
## comm          2  211.1  105.57   228.9 <2e-16 ***
## Residuals   230  106.1    0.46                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.666 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ comm")), data = var_data)
## 
## $comm
##                                     diff        lwr        upr     p adj
## Together_Chat-Together_None  -1.86501147 -2.1209028 -1.6091201 0.0000000
## Together_Jitsi-Together_None -1.94278925 -2.1986806 -1.6868979 0.0000000
## Together_Jitsi-Together_Chat -0.07777778 -0.3702514  0.2146959 0.8052639
## 
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: TEAM_MOTIVATION_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value   Pr(>F)    
## comm          2  26.44  13.220   13.75 2.31e-06 ***
## Residuals   226 217.27   0.961                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.108 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ comm")), data = var_data)
## 
## $comm
##                                     diff        lwr        upr     p adj
## Together_Chat-Together_None  -0.71197757 -1.0838333 -0.3401219 0.0000300
## Together_Jitsi-Together_None -0.64531091 -1.0171666 -0.2734552 0.0001739
## Together_Jitsi-Together_Chat  0.06666667 -0.3556705  0.4890039 0.9264304
## 
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: INTERDEPENDENCE_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)
## comm          2    8.0   4.001   1.537  0.217
## Residuals   223  580.6   2.604               
## Partielle η² = 0.014 
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: COMMON_GOAL_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value   Pr(>F)    
## comm          2  22.96   11.48   19.81 1.15e-08 ***
## Residuals   230 133.29    0.58                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.147 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ comm")), data = var_data)
## 
## $comm
##                                   diff        lwr       upr     p adj
## Together_Chat-Together_None  0.5739921  0.2871316 0.8608527 0.0000122
## Together_Jitsi-Together_None 0.6739921  0.3871316 0.9608527 0.0000002
## Together_Jitsi-Together_Chat 0.1000000 -0.2278702 0.4278702 0.7522114
## 
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: MEANS_COORDINATION_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value   Pr(>F)    
## comm          2   33.7  16.865   9.895 7.54e-05 ***
## Residuals   230  392.0   1.704                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.079 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ comm")), data = var_data)
## 
## $comm
##                                    diff         lwr       upr     p adj
## Together_Chat-Together_None  -0.4823009 -0.97424847 0.0096467 0.0560268
## Together_Jitsi-Together_None  0.5760324  0.08408486 1.0679800 0.0170128
## Together_Jitsi-Together_Chat  1.0583333  0.49605675 1.6206099 0.0000414
## 
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: GROUP_SIZE_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)    
## comm          2  419.2  209.61   381.5 <2e-16 ***
## Residuals   230  126.4    0.55                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.768 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ comm")), data = var_data)
## 
## $comm
##                                    diff        lwr        upr     p adj
## Together_Chat-Together_None  -2.6300393 -2.9093415 -2.3507372 0.0000000
## Together_Jitsi-Together_None -2.7355949 -3.0148970 -2.4562928 0.0000000
## Together_Jitsi-Together_Chat -0.1055556 -0.4247868  0.2136757 0.7156804
## 
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: GROUP_DIVERSITY_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)    
## comm          2  323.9  161.96   168.1 <2e-16 ***
## Residuals   225  216.7    0.96                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.599 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ comm")), data = var_data)
## 
## $comm
##                                     diff        lwr        upr     p adj
## Together_Chat-Together_None  -2.40370370 -2.7765447 -2.0308627 0.0000000
## Together_Jitsi-Together_None -2.37037037 -2.7432114 -1.9975294 0.0000000
## Together_Jitsi-Together_Chat  0.03333333 -0.3894286  0.4560953 0.9811049
## 
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: GROUP_SKILL_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value   Pr(>F)    
## comm          2  25.24  12.622   14.99 7.71e-07 ***
## Residuals   226 190.29   0.842                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.117 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ comm")), data = var_data)
## 
## $comm
##                                    diff        lwr        upr     p adj
## Together_Chat-Together_None  -0.5739042 -0.9219124 -0.2258959 0.0003856
## Together_Jitsi-Together_None -0.7350153 -1.0830235 -0.3870071 0.0000037
## Together_Jitsi-Together_Chat -0.1611111 -0.5563634  0.2341412 0.6018516
## 
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: COMMUNICATION_REQUIRED_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value  Pr(>F)   
## comm          2   43.8  21.924   7.046 0.00107 **
## Residuals   230  715.7   3.112                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.058 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ comm")), data = var_data)
## 
## $comm
##                                    diff        lwr       upr     p adj
## Together_Chat-Together_None  -0.2256637 -0.8903690 0.4390416 0.7028631
## Together_Jitsi-Together_None  0.8910029  0.2262977 1.5557082 0.0050372
## Together_Jitsi-Together_Chat  1.1166667  0.3569349 1.8763985 0.0018096
## 
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: WORK_INDEPENDENCE_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)
## comm          2   10.6   5.292   1.638  0.197
## Residuals   230  743.2   3.231               
## Partielle η² = 0.014 
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: SOCIAL_PRESENCE_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value   Pr(>F)    
## comm          2  107.0   53.50   23.44 5.66e-10 ***
## Residuals   225  513.6    2.28                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Partielle η² = 0.172 
## 
## 🔍 SIGNIFIKANTER EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ comm")), data = var_data)
## 
## $comm
##                                   diff       lwr      upr     p adj
## Together_Chat-Together_None  0.8733333 0.2994055 1.447261 0.0011766
## Together_Jitsi-Together_None 1.6366667 1.0627389 2.210594 0.0000000
## Together_Jitsi-Together_Chat 0.7633333 0.1125604 1.414106 0.0167867
## 
## 
##  ============================================================ 
## TEAM-FAKTOREN ANALYSE FÜR: PERCEIVED_TASK_COMPLEXITY_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)
## comm          1   1.46   1.463   0.803  0.372
## Residuals   118 215.03   1.822               
## Partielle η² = 0.007
# ================================================================================
# SCHRITT 4: ZUSAMMENFASSUNG DER ERGEBNISSE
# ================================================================================

print("\n=== SCHRITT 4: ANGEPASSTE ZUSAMMENFASSUNG ===")
## [1] "\n=== SCHRITT 4: ANGEPASSTE ZUSAMMENFASSUNG ==="
# ÄNDERUNG 6: Getrennte Zusammenfassungen

# Sammle signifikante Variablen
significant_round_vars <- names(round_results)[sapply(round_results, function(x) x$significant)]
significant_team_vars <- names(team_results)[sapply(team_results, function(x) x$significant)]

cat("📊 SIGNIFIKANTE RUNDENWEISE VARIABLEN:\n")
## 📊 SIGNIFIKANTE RUNDENWEISE VARIABLEN:
if(length(significant_round_vars) > 0) {
  for(var in significant_round_vars) {
    eta_sq <- round_results[[var]]$eta_squared
    cat("✅", toupper(var), "- η² =", round(eta_sq, 3), "\n")
  }
} else {
  cat("❌ Keine signifikanten rundenweisen Unterschiede\n")
}
## ✅ FLOW_SCORE - η² = 0.253 
## ✅ STRESS_VALUE - η² = 0.073 
## ✅ INDIVIDUAL_MOTIVATION_VALUE - η² = 0.051 
## ✅ VALENCE_VALUE - η² = 0.036 
## ✅ AROUSAL_VALUE - η² = 0.078 
## ✅ INFORMATION_SHARING_VALUE - η² = 0.069 
## ✅ SYNCHRONIZATION_VALUE - η² = 0.064
cat("\n📊 SIGNIFIKANTE TEAM-FAKTOREN:\n")
## 
## 📊 SIGNIFIKANTE TEAM-FAKTOREN:
if(length(significant_team_vars) > 0) {
  for(var in significant_team_vars) {
    eta_sq <- team_results[[var]]$eta_squared
    cat("✅", toupper(var), "- η² =", round(eta_sq, 3), "\n")
  }
} else {
  cat("❌ Keine signifikanten Team-Faktoren Unterschiede\n")
}
## ✅ TEAM_COMPOSITION_VALUE - η² = 0.666 
## ✅ TEAM_MOTIVATION_VALUE - η² = 0.108 
## ✅ COMMON_GOAL_VALUE - η² = 0.147 
## ✅ MEANS_COORDINATION_VALUE - η² = 0.079 
## ✅ GROUP_SIZE_VALUE - η² = 0.768 
## ✅ GROUP_DIVERSITY_VALUE - η² = 0.599 
## ✅ GROUP_SKILL_VALUE - η² = 0.117 
## ✅ COMMUNICATION_REQUIRED_VALUE - η² = 0.058 
## ✅ SOCIAL_PRESENCE_VALUE - η² = 0.172
# ================================================================================
# SCHRITT 5: VISUALISIERUNGEN
# ================================================================================

print("\n=== SCHRITT 5: ANGEPASSTE VISUALISIERUNGEN ===")
## [1] "\n=== SCHRITT 5: ANGEPASSTE VISUALISIERUNGEN ==="
# ÄNDERUNG 7: Erweiterte Variablenlabels für alle Team-Faktoren

create_variable_labels <- function() {
  c(
    # Rundenweise
    "flow_score" = "Flow Score",
    "stress_value" = "Stress",
    "individual_motivation_value" = "Individual Motivation",
    "valence_value" = "Valence",
    "arousal_value" = "Arousal",
    "information_sharing_value" = "Information Sharing",
    "synchronization_value" = "Synchronization",
    # Original Team-Faktoren
    "team_composition_value" = "Team Composition",
    "team_motivation_value" = "Team Motivation",
    "interdependence_value" = "Interdependence",
    "common_goal_value" = "Common Goal",
    "means_coordination_value" = "Means for Coordination",
    # Erweiterte Team-Faktoren
    "group_size_value" = "Group Size",
    "group_diversity_value" = "Group Diversity",
    "group_skill_value" = "Group Skill",
    "communication_required_value" = "Communication Required",
    "work_independence_value" = "Work Independence",
    "social_presence_value" = "Social Presence",
    "perceived_task_complexity_value" = "Perceived Task Complexity"
  )
}

variable_labels <- create_variable_labels()

# ÄNDERUNG 8: Kombinierte Visualisierung für alle Faktoren

# Bereite kombinierte Plot-Daten vor
prepare_combined_plot_data <- function() {
  # Rundenweise Daten (aggregiert pro Person)
  round_plot_data <- analysis_data_rounds %>%
    group_by(participant.code, comm) %>%
    summarise(across(all_of(rundenweise_vars), ~ mean(.x, na.rm = TRUE)), .groups = "drop") %>%
    pivot_longer(cols = all_of(rundenweise_vars), names_to = "variable", values_to = "value") %>%
    filter(!is.na(value)) %>%
    mutate(type = "Rundenweise")
  
  # Team-Faktoren Daten
  team_plot_data <- analysis_data_team %>%
    select(participant.code, comm, all_of(team_factor_vars)) %>%
    pivot_longer(cols = all_of(team_factor_vars), names_to = "variable", values_to = "value") %>%
    filter(!is.na(value)) %>%
    mutate(type = "Team-Faktoren")
  
  # Kombiniere beide
  combined_data <- bind_rows(round_plot_data, team_plot_data) %>%
    mutate(
      variable_clean = variable_labels[variable],
      variable_clean = ifelse(is.na(variable_clean), variable, variable_clean),
      comm_clean = case_when(
        comm == "Alone" ~ "Alone",
        comm == "Together_None" ~ "Together\n(No Comm)",
        comm == "Together_Chat" ~ "Together\n(Chat)",
        comm == "Together_Jitsi" ~ "Together\n(Video/Audio)",
        TRUE ~ as.character(comm)
      )
    )
  
  return(combined_data)
}

plot_data_combined <- prepare_combined_plot_data()

# Erstelle erweiterte Boxplots für alle Variablen
create_extended_boxplot <- function(var_name, data) {
  var_data <- data %>% filter(variable == var_name)
  if(nrow(var_data) == 0) return(NULL)
  
  var_label <- unique(var_data$variable_clean)[1]
  var_type <- unique(var_data$type)[1]
  
  # Unterschiedliche Farbpaletten für verschiedene Typen
  colors <- if(var_type == "Rundenweise") {
    RColorBrewer::brewer.pal(4, "Set2")
  } else {
    RColorBrewer::brewer.pal(4, "Set1")
  }
  
  ggplot(var_data, aes(x = comm_clean, y = value, fill = comm_clean)) +
    geom_boxplot(alpha = 0.7, outlier.size = 1.5) +
    geom_jitter(width = 0.2, alpha = 0.4, size = 1) +
    scale_fill_manual(values = colors) +
    labs(
      title = paste(var_label, paste0("(", var_type, ")")),
      x = "Communication Form",
      y = "Value"
    ) +
    theme_minimal() +
    theme(
      plot.title = element_text(size = 11, face = "bold", hjust = 0.5),
      axis.text.x = element_text(size = 9),
      axis.text.y = element_text(size = 9),
      legend.position = "none"
    )
}

# ÄNDERUNG 9: Erstelle Boxplots für alle erweiterten Variablen
print("Erstelle erweiterte Boxplots...")
## [1] "Erstelle erweiterte Boxplots..."
extended_boxplots <- list()
for(var in all_vars) {
  if(var %in% unique(plot_data_combined$variable)) {
    cat("Erstelle Boxplot für:", var, "\n")
    plot <- create_extended_boxplot(var, plot_data_combined)
    if(!is.null(plot)) {
      extended_boxplots[[var]] <- plot
    }
  }
}
## Erstelle Boxplot für: flow_score 
## Erstelle Boxplot für: stress_value 
## Erstelle Boxplot für: individual_motivation_value 
## Erstelle Boxplot für: valence_value 
## Erstelle Boxplot für: arousal_value 
## Erstelle Boxplot für: information_sharing_value 
## Erstelle Boxplot für: synchronization_value 
## Erstelle Boxplot für: team_composition_value 
## Erstelle Boxplot für: team_motivation_value 
## Erstelle Boxplot für: interdependence_value 
## Erstelle Boxplot für: common_goal_value 
## Erstelle Boxplot für: means_coordination_value 
## Erstelle Boxplot für: group_size_value 
## Erstelle Boxplot für: group_diversity_value 
## Erstelle Boxplot für: group_skill_value 
## Erstelle Boxplot für: communication_required_value 
## Erstelle Boxplot für: work_independence_value 
## Erstelle Boxplot für: social_presence_value 
## Erstelle Boxplot für: perceived_task_complexity_value
# Zeige alle Plots in größerem Grid (6x3 für 18 Variablen)
if(length(extended_boxplots) > 0) {
  # Berechne optimale Grid-Dimensionen
  n_plots <- length(extended_boxplots)
  ncol <- min(6, ceiling(sqrt(n_plots)))
  nrow <- ceiling(n_plots / ncol)
  
  grid.arrange(grobs = extended_boxplots, ncol = ncol, nrow = nrow,
               top = "Extended Team Factors Analysis: All Communication Forms")
}

print("\n=== SPEZIELLE TEAM-FAKTOREN VISUALISIERUNG ===")
## [1] "\n=== SPEZIELLE TEAM-FAKTOREN VISUALISIERUNG ==="
# ÄNDERUNG 10: Erstelle Grafik im Stil deiner Beispielgrafik

create_team_factors_profile_plot <- function(data) {
  # Bereite Daten für Profil-Plot vor
  profile_data <- data %>%
    group_by(comm) %>%
    summarise(
      across(all_of(team_factor_vars), ~ mean(.x, na.rm = TRUE)),
      .groups = "drop"
    ) %>%
    pivot_longer(cols = all_of(team_factor_vars), names_to = "factor", values_to = "score") %>%
    mutate(
      factor_clean = variable_labels[factor],
      factor_clean = ifelse(is.na(factor_clean), factor, factor_clean),
      comm_clean = case_when(
        comm == "Alone" ~ "Alone",
        comm == "Together_None" ~ "Together (No Comm)",
        comm == "Together_Chat" ~ "Together (Chat)", 
        comm == "Together_Jitsi" ~ "Together (Video/Audio)",
        TRUE ~ as.character(comm)
      )
    )
  
  # Erstelle den Profil-Plot
  ggplot(profile_data, aes(x = factor_clean, y = score, fill = comm_clean)) +
    geom_col(position = "dodge", alpha = 0.8, color = "black", size = 0.3) +
    geom_text(aes(label = round(score, 2)), 
              position = position_dodge(width = 0.9), 
              vjust = -0.5, size = 3, fontweight = "bold") +
    scale_fill_brewer(type = "qual", palette = "Set2") +
    labs(
      title = "Team Factors Profile by Communication Condition",
      subtitle = "Mean scores across all team perception dimensions",
      x = "Team Factor",
      y = "Mean Score",
      fill = "Communication\nCondition"
    ) +
    theme_minimal() +
    theme(
      axis.text.x = element_text(angle = 45, hjust = 1, size = 10),
      plot.title = element_text(size = 14, face = "bold"),
      legend.position = "bottom"
    )
}

if(nrow(analysis_data_team) > 0) {
  team_profile_plot <- create_team_factors_profile_plot(analysis_data_team)
  print(team_profile_plot)
}
## Warning in geom_text(aes(label = round(score, 2)), position =
## position_dodge(width = 0.9), : Ignoring unknown parameters: `fontweight`
## Warning: Removed 1 row containing missing values or values outside the scale range
## (`geom_col()`).
## Warning: Removed 1 row containing missing values or values outside the scale range
## (`geom_text()`).

# ================================================================================
# SCHRITT 6: KORRELATIONSANALYSE
# ================================================================================

# Kombiniere beide Datensätze für Korrelationsanalyse
combined_cor_data <- analysis_data_rounds %>%
  group_by(participant.code, comm) %>%
  summarise(across(all_of(rundenweise_vars), ~ mean(.x, na.rm = TRUE)), .groups = "drop") %>%
  left_join(
    analysis_data_team %>% select(participant.code, comm, all_of(team_factor_vars)),
    by = c("participant.code", "comm")
  ) %>%
  select(all_of(all_vars)) %>%
  select(where(~ sum(!is.na(.)) > 10))  # Nur Variablen mit genügend Daten

if(ncol(combined_cor_data) >= 2) {
  cor_matrix <- cor(combined_cor_data, use = "pairwise.complete.obs")
  
  # Korrelations-Heatmap
  corrplot(cor_matrix, 
           method = "color",
           type = "upper",
           order = "hclust",
           tl.cex = 0.8,
           tl.col = "black",
           title = "Korrelationen zwischen allen Einflussfaktoren",
           mar = c(0,0,2,0))
}

# ================================================================================
# SCHRITT 7: BOX-PLOTS
# ================================================================================

# Bereite kombinierte Plot-Daten vor (genau wie im anderen reparierten Code)
prepare_corrected_boxplot_data <- function() {
  
  # Rundenweise Daten (aggregiert pro Person für Boxplots)
  round_plot_data <- analysis_data_rounds %>%
    group_by(participant.code, comm) %>%
    summarise(across(all_of(rundenweise_vars), ~ mean(.x, na.rm = TRUE)), .groups = "drop") %>%
    pivot_longer(cols = all_of(rundenweise_vars), names_to = "variable", values_to = "value") %>%
    filter(!is.na(value)) %>%
    mutate(type = "Rundenweise")
  
  # Team-Faktoren Daten
  team_plot_data <- analysis_data_team %>%
    select(participant.code, comm, all_of(team_factor_vars)) %>%
    pivot_longer(cols = all_of(team_factor_vars), names_to = "variable", values_to = "value") %>%
    filter(!is.na(value)) %>%
    mutate(type = "Team-Faktoren")
  
  # Kombiniere beide
  combined_data <- bind_rows(round_plot_data, team_plot_data) %>%
    mutate(
      # Verwende die erweiterten Variablenlabels
      variable_clean = variable_labels[variable],
      variable_clean = ifelse(is.na(variable_clean), variable, variable_clean),
      comm_clean = case_when(
        comm == "Alone" ~ "Alone",
        comm == "Together_None" ~ "Together\n(No Comm)",
        comm == "Together_Chat" ~ "Together\n(Chat)",
        comm == "Together_Jitsi" ~ "Together\n(Video/Audio)",
        TRUE ~ as.character(comm)
      )
    )
  
  return(combined_data)
}

# Erstelle korrigierte Plot-Daten
corrected_plot_data <- prepare_corrected_boxplot_data()

print(paste("Korrigierte Plot-Daten erstellt:", nrow(corrected_plot_data), "Datenpunkte"))
## [1] "Korrigierte Plot-Daten erstellt: 4477 Datenpunkte"
print(paste("Verfügbare Variablen:", length(unique(corrected_plot_data$variable))))
## [1] "Verfügbare Variablen: 19"
# Zeige verfügbare Variablen
available_vars <- unique(corrected_plot_data$variable)
cat("Verfügbare Variablen für Boxplots:\n")
## Verfügbare Variablen für Boxplots:
for(var in available_vars) {
  var_label <- unique(corrected_plot_data$variable_clean[corrected_plot_data$variable == var])[1]
  var_type <- unique(corrected_plot_data$type[corrected_plot_data$variable == var])[1]
  n_points <- sum(corrected_plot_data$variable == var & !is.na(corrected_plot_data$value))
  cat("  -", var, ":", var_label, "(", var_type, ") - n =", n_points, "\n")
}
##   - flow_score : Flow Score ( Rundenweise ) - n = 270 
##   - stress_value : Stress ( Rundenweise ) - n = 271 
##   - individual_motivation_value : Individual Motivation ( Rundenweise ) - n = 270 
##   - valence_value : Valence ( Rundenweise ) - n = 271 
##   - arousal_value : Arousal ( Rundenweise ) - n = 271 
##   - information_sharing_value : Information Sharing ( Rundenweise ) - n = 233 
##   - synchronization_value : Synchronization ( Rundenweise ) - n = 233 
##   - team_composition_value : Team Composition ( Team-Faktoren ) - n = 233 
##   - team_motivation_value : Team Motivation ( Team-Faktoren ) - n = 229 
##   - interdependence_value : Interdependence ( Team-Faktoren ) - n = 226 
##   - common_goal_value : Common Goal ( Team-Faktoren ) - n = 233 
##   - means_coordination_value : Means for Coordination ( Team-Faktoren ) - n = 233 
##   - group_size_value : Group Size ( Team-Faktoren ) - n = 233 
##   - group_diversity_value : Group Diversity ( Team-Faktoren ) - n = 228 
##   - group_skill_value : Group Skill ( Team-Faktoren ) - n = 229 
##   - communication_required_value : Communication Required ( Team-Faktoren ) - n = 233 
##   - work_independence_value : Work Independence ( Team-Faktoren ) - n = 233 
##   - social_presence_value : Social Presence ( Team-Faktoren ) - n = 228 
##   - perceived_task_complexity_value : Perceived Task Complexity ( Team-Faktoren ) - n = 120
# Erstelle korrigierte Boxplot-Funktion
create_corrected_boxplot <- function(var_name, data = corrected_plot_data) {
  var_data <- data %>% filter(variable == var_name)
  
  if(nrow(var_data) == 0) {
    cat("⚠️ Keine Daten für Variable:", var_name, "\n")
    return(NULL)
  }
  
  var_label <- unique(var_data$variable_clean)[1]
  var_type <- unique(var_data$type)[1]
  
  # Unterschiedliche Farbpaletten für verschiedene Typen
  colors <- if(var_type == "Rundenweise") {
    RColorBrewer::brewer.pal(4, "Set2")
  } else {
    RColorBrewer::brewer.pal(4, "Set1")
  }
  
  ggplot(var_data, aes(x = comm_clean, y = value, fill = comm_clean)) +
    geom_boxplot(alpha = 0.7, outlier.size = 1.5) +
    geom_jitter(width = 0.2, alpha = 0.4, size = 1) +
    scale_fill_manual(values = colors) +
    labs(
      title = paste(var_label, paste0("(", var_type, ")")),
      x = "Communication Form",
      y = "Value"
    ) +
    theme_minimal() +
    theme(
      plot.title = element_text(size = 11, face = "bold", hjust = 0.5),
      axis.text.x = element_text(size = 9),
      axis.text.y = element_text(size = 9),
      legend.position = "none"
    )
}


print("\nErstelle korrigierte Boxplots für alle Variablen...")
## [1] "\nErstelle korrigierte Boxplots für alle Variablen..."
# Erstelle Boxplots für ALLE verfügbaren Variablen
corrected_boxplots <- list()
successful_plots <- 0

for(var in all_vars) {
  if(var %in% available_vars) {
    cat("Erstelle Boxplot für:", var, "\n")
    plot <- create_corrected_boxplot(var, corrected_plot_data)
    if(!is.null(plot)) {
      corrected_boxplots[[var]] <- plot
      successful_plots <- successful_plots + 1
    }
  } else {
    cat("⚠️ Variable nicht in Daten verfügbar:", var, "\n")
  }
}
## Erstelle Boxplot für: flow_score 
## Erstelle Boxplot für: stress_value 
## Erstelle Boxplot für: individual_motivation_value 
## Erstelle Boxplot für: valence_value 
## Erstelle Boxplot für: arousal_value 
## Erstelle Boxplot für: information_sharing_value 
## Erstelle Boxplot für: synchronization_value 
## Erstelle Boxplot für: team_composition_value 
## Erstelle Boxplot für: team_motivation_value 
## Erstelle Boxplot für: interdependence_value 
## Erstelle Boxplot für: common_goal_value 
## Erstelle Boxplot für: means_coordination_value 
## Erstelle Boxplot für: group_size_value 
## Erstelle Boxplot für: group_diversity_value 
## Erstelle Boxplot für: group_skill_value 
## Erstelle Boxplot für: communication_required_value 
## Erstelle Boxplot für: work_independence_value 
## Erstelle Boxplot für: social_presence_value 
## Erstelle Boxplot für: perceived_task_complexity_value
cat("Erfolgreich erstellte Boxplots:", successful_plots, "von", length(all_vars), "\n")
## Erfolgreich erstellte Boxplots: 19 von 19
if(length(corrected_boxplots) > 0) {
  
  print("\n=== ZEIGE KORRIGIERTE BOXPLOTS ===")
  
  # Berechne optimale Grid-Dimensionen für alle Plots
  n_plots <- length(corrected_boxplots)
  if(n_plots <= 12) {
    ncol <- 4
    nrow <- 3
  } else if(n_plots <= 18) {
    ncol <- 6
    nrow <- 3
  } else {
    ncol <- 6
    nrow <- ceiling(n_plots / 6)
  }
  
  print(paste("Zeige", n_plots, "Boxplots in einem", ncol, "x", nrow, "Grid"))
  
  # Zeige alle Plots in einem Grid
  grid.arrange(grobs = corrected_boxplots, ncol = ncol, nrow = nrow,
               top = "Corrected Communication Analysis: All Variables")
  
  
  print("\n=== SEPARATE GRIDS NACH TYP ===")
  
  # Rundenweise Boxplots
  round_boxplots <- corrected_boxplots[names(corrected_boxplots) %in% rundenweise_vars]
  if(length(round_boxplots) > 0) {
    print(paste("Rundenweise Variablen:", length(round_boxplots), "Plots"))
    
    ncol_round <- min(4, length(round_boxplots))
    nrow_round <- ceiling(length(round_boxplots) / ncol_round)
    
    grid.arrange(grobs = round_boxplots, ncol = ncol_round, nrow = nrow_round,
                 top = "Rundenweise Mediatoren (Aggregiert pro Person)")
  }
  
  # Team-Faktoren Boxplots  
  team_boxplots <- corrected_boxplots[names(corrected_boxplots) %in% team_factor_vars]
  if(length(team_boxplots) > 0) {
    print(paste("Team-Faktoren:", length(team_boxplots), "Plots"))
    
    ncol_team <- min(4, length(team_boxplots))
    nrow_team <- ceiling(length(team_boxplots) / ncol_team)
    
    grid.arrange(grobs = team_boxplots, ncol = ncol_team, nrow = nrow_team,
                 top = "Team-Faktoren (Post-Runden Messungen)")
  }
  
  cat("\n=== EINZELNE KORRIGIERTE BOXPLOTS ===\n")
  
  # Gruppiere nach Typ für bessere Organisation
  cat("RUNDENWEISE VARIABLEN:\n")
  for(var in names(round_boxplots)) {
    cat("Zeige Boxplot für:", var, "\n")
    print(round_boxplots[[var]])
  }
  
  cat("\nTEAM-FAKTOREN:\n")
  for(var in names(team_boxplots)) {
    cat("Zeige Boxplot für:", var, "\n")
    print(team_boxplots[[var]])
  }
  
} else {
  cat("❌ Keine Boxplots erstellt - prüfe Datenaufbereitung!\n")
}
## [1] "\n=== ZEIGE KORRIGIERTE BOXPLOTS ==="
## [1] "Zeige 19 Boxplots in einem 6 x 4 Grid"

## [1] "\n=== SEPARATE GRIDS NACH TYP ==="
## [1] "Rundenweise Variablen: 7 Plots"

## [1] "Team-Faktoren: 12 Plots"

## 
## === EINZELNE KORRIGIERTE BOXPLOTS ===
## RUNDENWEISE VARIABLEN:
## Zeige Boxplot für: flow_score

## Zeige Boxplot für: stress_value

## Zeige Boxplot für: individual_motivation_value

## Zeige Boxplot für: valence_value

## Zeige Boxplot für: arousal_value

## Zeige Boxplot für: information_sharing_value

## Zeige Boxplot für: synchronization_value

## 
## TEAM-FAKTOREN:
## Zeige Boxplot für: team_composition_value

## Zeige Boxplot für: team_motivation_value

## Zeige Boxplot für: interdependence_value

## Zeige Boxplot für: common_goal_value

## Zeige Boxplot für: means_coordination_value

## Zeige Boxplot für: group_size_value

## Zeige Boxplot für: group_diversity_value

## Zeige Boxplot für: group_skill_value

## Zeige Boxplot für: communication_required_value

## Zeige Boxplot für: work_independence_value

## Zeige Boxplot für: social_presence_value

## Zeige Boxplot für: perceived_task_complexity_value

print("\n=== DIAGNOSTIK ===")
## [1] "\n=== DIAGNOSTIK ==="
# Prüfe Datenverfügbarkeit
cat("Datenverfügbarkeit pro Variablentyp:\n")
## Datenverfügbarkeit pro Variablentyp:
round_availability <- corrected_plot_data %>%
  filter(type == "Rundenweise") %>%
  group_by(variable) %>%
  summarise(n_points = sum(!is.na(value)), .groups = "drop")

team_availability <- corrected_plot_data %>%
  filter(type == "Team-Faktoren") %>%
  group_by(variable) %>%
  summarise(n_points = sum(!is.na(value)), .groups = "drop")

cat("Rundenweise Variablen:\n")
## Rundenweise Variablen:
print(round_availability)
## # A tibble: 7 × 2
##   variable                    n_points
##   <chr>                          <int>
## 1 arousal_value                    271
## 2 flow_score                       270
## 3 individual_motivation_value      270
## 4 information_sharing_value        233
## 5 stress_value                     271
## 6 synchronization_value            233
## 7 valence_value                    271
cat("\nTeam-Faktoren:\n")
## 
## Team-Faktoren:
print(team_availability)
## # A tibble: 12 × 2
##    variable                        n_points
##    <chr>                              <int>
##  1 common_goal_value                    233
##  2 communication_required_value         233
##  3 group_diversity_value                228
##  4 group_size_value                     233
##  5 group_skill_value                    229
##  6 interdependence_value                226
##  7 means_coordination_value             233
##  8 perceived_task_complexity_value      120
##  9 social_presence_value                228
## 10 team_composition_value               233
## 11 team_motivation_value                229
## 12 work_independence_value              233
# Prüfe Kommunikationsformen-Verteilung
comm_distribution <- corrected_plot_data %>%
  group_by(type, comm) %>%
  summarise(n_participants = n_distinct(participant.code), .groups = "drop")

cat("\nTeilnehmer pro Kommunikationsform:\n")
## 
## Teilnehmer pro Kommunikationsform:
print(comm_distribution)
## # A tibble: 7 × 3
##   type          comm           n_participants
##   <chr>         <fct>                   <int>
## 1 Rundenweise   Alone                      38
## 2 Rundenweise   Together_None             113
## 3 Rundenweise   Together_Chat              60
## 4 Rundenweise   Together_Jitsi             60
## 5 Team-Faktoren Together_None             113
## 6 Team-Faktoren Together_Chat              60
## 7 Team-Faktoren Together_Jitsi             60
# ================================================================================
# FINALE EMPFEHLUNGEN
# ================================================================================

cat("\n", rep("=",80), "\n")
## 
##  = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
cat("EMPFEHLUNGEN FÜR WEITERE ANALYSEN\n")
## EMPFEHLUNGEN FÜR WEITERE ANALYSEN
cat(rep("=",80), "\n")
## = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
print("Analyse abgeschlossen! 🎉")
## [1] "Analyse abgeschlossen! 🎉"
# ================================================================================
# UNTERSUCHUNG DER EFFEKTGRÖSSEN-ANOMALIE BEI KOMMUNIKATIONSMEDIEN
# Warum haben nicht-signifikante Kommunikations-Variablen hohe Effektgrößen?
# ================================================================================

print("=== UNTERSUCHUNG DER KOMMUNIKATIONS-EFFEKTGRÖSSEN-ANOMALIE ===")
## [1] "=== UNTERSUCHUNG DER KOMMUNIKATIONS-EFFEKTGRÖSSEN-ANOMALIE ==="
# ================================================================================
# 1. DATENVERFÜGBARKEIT PRO VARIABLE UND KOMMUNIKATIONSFORM PRÜFEN
# ================================================================================

print("\n1. DATENVERFÜGBARKEIT PRO VARIABLE UND KOMMUNIKATIONSFORM:")
## [1] "\n1. DATENVERFÜGBARKEIT PRO VARIABLE UND KOMMUNIKATIONSFORM:"
# Analysiere Datenverfügbarkeit für ALLE Variablen (rundenweise + Team-Faktoren)

# Rundenweise Variablen
round_availability <- analysis_data_rounds %>%
  group_by(participant.code, comm) %>%
  summarise(across(all_of(rundenweise_vars), ~ mean(.x, na.rm = TRUE)), .groups = "drop") %>%
  select(comm, all_of(rundenweise_vars)) %>%
  pivot_longer(cols = all_of(rundenweise_vars), names_to = "variable", values_to = "value") %>%
  group_by(variable, comm) %>%
  summarise(
    n_available = sum(!is.na(value)),
    n_missing = sum(is.na(value)),
    pct_available = round(100 * sum(!is.na(value)) / n(), 1),
    .groups = "drop"
  ) %>%
  mutate(type = "Rundenweise")

# Team-Faktoren Verfügbarkeit
team_availability <- analysis_data_team %>%
  select(comm, all_of(team_factor_vars)) %>%
  pivot_longer(cols = all_of(team_factor_vars), names_to = "variable", values_to = "value") %>%
  group_by(variable, comm) %>%
  summarise(
    n_available = sum(!is.na(value)),
    n_missing = sum(is.na(value)),
    pct_available = round(100 * sum(!is.na(value)) / n(), 1),
    .groups = "drop"
  ) %>%
  mutate(type = "Team-Faktoren")

# Kombiniere beide
all_availability <- bind_rows(round_availability, team_availability) %>%
  pivot_wider(names_from = comm, values_from = c(n_available, n_missing, pct_available)) %>%
  mutate(
    total_available = rowSums(select(., starts_with("n_available_")), na.rm = TRUE),
    variable_clean = variable_labels[variable],
    variable_clean = ifelse(is.na(variable_clean), variable, variable_clean)
  ) %>%
  arrange(type, total_available)

print("REPARIERTE Datenverfügbarkeit pro Variable und Kommunikationsform:")
## [1] "REPARIERTE Datenverfügbarkeit pro Variable und Kommunikationsform:"
available_cols <- names(all_availability)[grepl("n_available_", names(all_availability))]

print(all_availability %>% 
      select(variable_clean, type, all_of(available_cols), total_available))
## # A tibble: 19 × 7
##    variable_clean            type       n_available_Alone n_available_Together…¹
##    <chr>                     <chr>                  <int>                  <int>
##  1 Information Sharing       Rundenwei…                 0                    113
##  2 Synchronization           Rundenwei…                 0                    113
##  3 Flow Score                Rundenwei…                38                    112
##  4 Individual Motivation     Rundenwei…                38                    112
##  5 Arousal                   Rundenwei…                38                    113
##  6 Stress                    Rundenwei…                38                    113
##  7 Valence                   Rundenwei…                38                    113
##  8 Perceived Task Complexity Team-Fakt…                NA                      0
##  9 Interdependence           Team-Fakt…                NA                    106
## 10 Group Diversity           Team-Fakt…                NA                    108
## 11 Social Presence           Team-Fakt…                NA                    108
## 12 Group Skill               Team-Fakt…                NA                    109
## 13 Team Motivation           Team-Fakt…                NA                    109
## 14 Common Goal               Team-Fakt…                NA                    113
## 15 Communication Required    Team-Fakt…                NA                    113
## 16 Group Size                Team-Fakt…                NA                    113
## 17 Means for Coordination    Team-Fakt…                NA                    113
## 18 Team Composition          Team-Fakt…                NA                    113
## 19 Work Independence         Team-Fakt…                NA                    113
## # ℹ abbreviated name: ¹​n_available_Together_None
## # ℹ 3 more variables: n_available_Together_Chat <int>,
## #   n_available_Together_Jitsi <int>, total_available <dbl>
# ================================================================================
# 2. KORRIGIERTE KOMMUNIKATIONS-EFFEKTGRÖSSEN-BERECHNUNG
# ================================================================================

# Kombiniere Ergebnisse aus rundenweisen und Team-Faktoren Analysen
create_combined_effect_analysis <- function() {
  
  all_effect_results <- list()
  
  # Rundenweise Ergebnisse hinzufügen
  for(var in names(round_results)) {
    all_effect_results[[var]] <- list(
      type = "Rundenweise",
      significant = round_results[[var]]$significant,
      eta_squared = round_results[[var]]$eta_squared,
      anova = round_results[[var]]$anova,
      data_source = "analysis_data_rounds"
    )
  }
  
  # Team-Faktoren Ergebnisse hinzufügen
  for(var in names(team_results)) {
    all_effect_results[[var]] <- list(
      type = "Team-Faktoren",
      significant = team_results[[var]]$significant,
      eta_squared = team_results[[var]]$eta_squared,
      anova = team_results[[var]]$anova,
      data_source = "analysis_data_team"
    )
  }
  
  return(all_effect_results)
}

# Erstelle kombinierte Analyse
combined_effect_results <- create_combined_effect_analysis()

# Analysiere Stichprobengrößen für alle Variablen
analyze_sample_sizes <- function() {
  sample_info <- list()
  
  # Rundenweise Variablen
  for(var in rundenweise_vars) {
    if(var %in% names(round_results)) {
      var_data <- analysis_data_rounds %>%
        filter(!is.na(!!sym(var))) %>%
        group_by(comm) %>%
        summarise(n = n(), .groups = "drop")
      
      sample_info[[var]] <- var_data
    }
  }
  
  # Team-Faktoren
  for(var in team_factor_vars) {
    if(var %in% names(team_results)) {
      var_data <- analysis_data_team %>%
        filter(!is.na(!!sym(var))) %>%
        group_by(comm) %>%
        summarise(n = n(), .groups = "drop")
      
      sample_info[[var]] <- var_data
    }
  }
  
  return(sample_info)
}

combined_sample_sizes <- analyze_sample_sizes()

# Erstelle reparierte Ergebnistabelle
create_repaired_results_table <- function() {
  
  results_df <- data.frame(
    Variable = all_vars,
    stringsAsFactors = FALSE
  )
  
  # Füge Typ hinzu
  results_df$Type <- ifelse(results_df$Variable %in% rundenweise_vars, "Rundenweise", "Team-Faktoren")
  
  # Initialisiere Spalten
  results_df$N_total <- 0
  results_df$N_groups <- 0
  results_df$Eta_squared <- NA_real_
  results_df$P_value <- NA_real_
  results_df$F_value <- NA_real_
  results_df$Signifikant <- FALSE
  results_df$Analysis_Type <- NA_character_
  
  # Fülle Informationen für alle Variablen
  for(var in all_vars) {
    idx <- which(results_df$Variable == var)
    
    if(var %in% names(combined_effect_results)) {
      result <- combined_effect_results[[var]]
      
      results_df$Signifikant[idx] <- result$significant
      results_df$Eta_squared[idx] <- round(result$eta_squared, 3)
      results_df$Analysis_Type[idx] <- ifelse(result$type == "Rundenweise", 
                                             "Mixed-Effects ANOVA", "Simple ANOVA")
      
      # Extrahiere P- und F-Werte
      if(!is.null(result$anova)) {
        tryCatch({
          if(result$type == "Rundenweise") {
            results_df$P_value[idx] <- round(result$anova$`Pr(>F)`[1], 4)
            results_df$F_value[idx] <- round(result$anova$`F value`[1], 3)
          } else {
            results_df$P_value[idx] <- round(result$anova[[1]]$`Pr(>F)`[1], 4)
            results_df$F_value[idx] <- round(result$anova[[1]]$`F value`[1], 3)
          }
        }, error = function(e) {
          cat("⚠️ Konnte P/F-Werte für", var, "nicht extrahieren\n")
        })
      }
    }
    
    # Stichprobengrößen
    if(var %in% names(combined_sample_sizes)) {
      sample_data <- combined_sample_sizes[[var]]
      results_df$N_total[idx] <- sum(sample_data$n)
      results_df$N_groups[idx] <- nrow(sample_data)
    }
  }
  
  # Füge schöne Labels hinzu
  results_df$Variable_Clean <- variable_labels[results_df$Variable]
  results_df$Variable_Clean <- ifelse(is.na(results_df$Variable_Clean), 
                                     results_df$Variable, results_df$Variable_Clean)
  
  # Zuverlässigkeits-Check
  results_df$Reliable <- results_df$N_total >= 20 & 
                         !is.na(results_df$Eta_squared) &
                         results_df$Eta_squared < 0.9
  
  # Sortiere
  results_df <- results_df %>%
    arrange(Type, desc(Signifikant), desc(Eta_squared))
  
  return(results_df)
}

# Erstelle und zeige reparierte Ergebnistabelle
repaired_results <- create_repaired_results_table()

print("REPARIERTE KOMBINIERTE ERGEBNISTABELLE:")
## [1] "REPARIERTE KOMBINIERTE ERGEBNISTABELLE:"
print(repaired_results %>% 
      select(Variable_Clean, Type, Signifikant, P_value, F_value, Eta_squared, N_total, Analysis_Type))
##               Variable_Clean          Type Signifikant P_value F_value
## 1                 Flow Score   Rundenweise        TRUE  0.0000  29.080
## 2                    Arousal   Rundenweise        TRUE  0.0001   7.444
## 3                     Stress   Rundenweise        TRUE  0.0002   6.949
## 4        Information Sharing   Rundenweise        TRUE  0.0003   8.251
## 5            Synchronization   Rundenweise        TRUE  0.0007   7.503
## 6      Individual Motivation   Rundenweise        TRUE  0.0038   4.579
## 7                    Valence   Rundenweise        TRUE  0.0235   3.212
## 8                 Group Size Team-Faktoren        TRUE  0.0000 381.524
## 9           Team Composition Team-Faktoren        TRUE  0.0000 228.925
## 10           Group Diversity Team-Faktoren        TRUE  0.0000 168.139
## 11           Social Presence Team-Faktoren        TRUE  0.0000  23.441
## 12               Common Goal Team-Faktoren        TRUE  0.0000  19.809
## 13               Group Skill Team-Faktoren        TRUE  0.0000  14.990
## 14           Team Motivation Team-Faktoren        TRUE  0.0000  13.751
## 15    Means for Coordination Team-Faktoren        TRUE  0.0001   9.895
## 16    Communication Required Team-Faktoren        TRUE  0.0011   7.046
## 17           Interdependence Team-Faktoren       FALSE  0.2173   1.537
## 18         Work Independence Team-Faktoren       FALSE  0.1967   1.638
## 19 Perceived Task Complexity Team-Faktoren       FALSE  0.3721   0.803
##    Eta_squared N_total       Analysis_Type
## 1        0.253    1038 Mixed-Effects ANOVA
## 2        0.078    1043 Mixed-Effects ANOVA
## 3        0.073    1044 Mixed-Effects ANOVA
## 4        0.069     888 Mixed-Effects ANOVA
## 5        0.064     891 Mixed-Effects ANOVA
## 6        0.051    1035 Mixed-Effects ANOVA
## 7        0.036    1045 Mixed-Effects ANOVA
## 8        0.768     233        Simple ANOVA
## 9        0.666     233        Simple ANOVA
## 10       0.599     228        Simple ANOVA
## 11       0.172     228        Simple ANOVA
## 12       0.147     233        Simple ANOVA
## 13       0.117     229        Simple ANOVA
## 14       0.108     229        Simple ANOVA
## 15       0.079     233        Simple ANOVA
## 16       0.058     233        Simple ANOVA
## 17       0.014     226        Simple ANOVA
## 18       0.014     233        Simple ANOVA
## 19       0.007     120        Simple ANOVA
# Zusammenfassung
total_vars <- nrow(repaired_results)
significant_vars <- sum(repaired_results$Signifikant, na.rm = TRUE)
reliable_vars <- sum(repaired_results$Reliable, na.rm = TRUE)

cat("\n=== REPARIERTE ZUSAMMENFASSUNG ===\n")
## 
## === REPARIERTE ZUSAMMENFASSUNG ===
cat("📊 GESAMTÜBERSICHT:\n")
## 📊 GESAMTÜBERSICHT:
cat("- Gesamte Variablen:", total_vars, "\n")
## - Gesamte Variablen: 19
cat("- Signifikante Variablen:", significant_vars, "(", round(100*significant_vars/total_vars, 1), "%)\n")
## - Signifikante Variablen: 16 ( 84.2 %)
cat("- Zuverlässige Variablen:", reliable_vars, "(", round(100*reliable_vars/total_vars, 1), "%)\n")
## - Zuverlässige Variablen: 19 ( 100 %)
# Zeige problematische Variablen
problematic_vars <- repaired_results$Variable[!repaired_results$Reliable & !is.na(repaired_results$Eta_squared)]
if(length(problematic_vars) > 0) {
  cat("\n⚠️ PROBLEMATISCHE VARIABLEN (zu wenig Daten oder überschätzte Effektgrößen):\n")
  for(var in problematic_vars) {
    row <- repaired_results[repaired_results$Variable == var, ]
    cat("  -", row$Variable_Clean, ": n=", row$N_total, ", η²=", row$Eta_squared, "\n")
  }
}

# ================================================================================
# 4. INTERPRETATION DER KOMMUNIKATIONS-EFFEKTGRÖSSEN-ANOMALIE
# ================================================================================

# ================================================================================
# KORREKTUR: ERSETZE FEHLENDE comm_corrected_results DURCH repaired_results
# ================================================================================

# Das Problem: comm_corrected_results existiert nicht
# Lösung: Verwende repaired_results (die korrekte Variable)

cat("\n", paste(rep("=", 80), collapse=""), "\n")
## 
##  ================================================================================
cat("INTERPRETATION DER KOMMUNIKATIONS-EFFEKTGRÖSSEN-ANOMALIE\n")
## INTERPRETATION DER KOMMUNIKATIONS-EFFEKTGRÖSSEN-ANOMALIE
cat(paste(rep("=", 80), collapse=""), "\n")
## ================================================================================
# KORRIGIERT: Verwende repaired_results statt comm_corrected_results
reliable_comm_vars <- repaired_results$Variable[repaired_results$Reliable & !is.na(repaired_results$Reliable)]
unreliable_comm_vars <- repaired_results$Variable[!repaired_results$Reliable & !is.na(repaired_results$Reliable)]

cat("🔍 ERKLÄRUNG DER KOMMUNIKATIONS-EFFEKTGRÖSSEN-ANOMALIE:\n\n")
## 🔍 ERKLÄRUNG DER KOMMUNIKATIONS-EFFEKTGRÖSSEN-ANOMALIE:
cat("Die hohen Effektgrößen (>0.8) bei nicht-signifikanten Kommunikations-Effekten entstehen durch:\n")
## Die hohen Effektgrößen (>0.8) bei nicht-signifikanten Kommunikations-Effekten entstehen durch:
cat("1. 📊 UNBALANCIERTE GRUPPEN: Manche Kommunikationsformen haben sehr wenige Teilnehmer\n")
## 1. 📊 UNBALANCIERTE GRUPPEN: Manche Kommunikationsformen haben sehr wenige Teilnehmer
cat("2. 🎯 FEHLENDE DATEN: Nicht alle Kommunikationsformen haben Daten für alle Variablen\n")
## 2. 🎯 FEHLENDE DATEN: Nicht alle Kommunikationsformen haben Daten für alle Variablen
cat("3. 🔋 NIEDRIGE POWER: Bei 4 Gruppen braucht man mehr Teilnehmer für ausreichende Power\n")
## 3. 🔋 NIEDRIGE POWER: Bei 4 Gruppen braucht man mehr Teilnehmer für ausreichende Power
cat("4. 📐 ZWISCHEN-GRUPPEN VARIANZ: Große Unterschiede zwischen wenigen Datenpunkten\n\n")
## 4. 📐 ZWISCHEN-GRUPPEN VARIANZ: Große Unterschiede zwischen wenigen Datenpunkten
if(length(reliable_comm_vars) > 0) {
  cat("✅ ZUVERLÄSSIGE KOMMUNIKATIONS-ERGEBNISSE:\n")
  for(var in reliable_comm_vars) {
    row <- repaired_results[repaired_results$Variable == var, ]
    cat("  -", row$Variable_Clean, ": n=", row$N_total, ", Gruppen=", row$N_groups, 
        ", η²=", row$Eta_squared, ", p=", row$P_value, "\n")
  }
  cat("\n")
}
## ✅ ZUVERLÄSSIGE KOMMUNIKATIONS-ERGEBNISSE:
##   - Flow Score : n= 1038 , Gruppen= 4 , η²= 0.253 , p= 0 
##   - Arousal : n= 1043 , Gruppen= 4 , η²= 0.078 , p= 1e-04 
##   - Stress : n= 1044 , Gruppen= 4 , η²= 0.073 , p= 2e-04 
##   - Information Sharing : n= 888 , Gruppen= 3 , η²= 0.069 , p= 3e-04 
##   - Synchronization : n= 891 , Gruppen= 3 , η²= 0.064 , p= 7e-04 
##   - Individual Motivation : n= 1035 , Gruppen= 4 , η²= 0.051 , p= 0.0038 
##   - Valence : n= 1045 , Gruppen= 4 , η²= 0.036 , p= 0.0235 
##   - Group Size : n= 233 , Gruppen= 3 , η²= 0.768 , p= 0 
##   - Team Composition : n= 233 , Gruppen= 3 , η²= 0.666 , p= 0 
##   - Group Diversity : n= 228 , Gruppen= 3 , η²= 0.599 , p= 0 
##   - Social Presence : n= 228 , Gruppen= 3 , η²= 0.172 , p= 0 
##   - Common Goal : n= 233 , Gruppen= 3 , η²= 0.147 , p= 0 
##   - Group Skill : n= 229 , Gruppen= 3 , η²= 0.117 , p= 0 
##   - Team Motivation : n= 229 , Gruppen= 3 , η²= 0.108 , p= 0 
##   - Means for Coordination : n= 233 , Gruppen= 3 , η²= 0.079 , p= 1e-04 
##   - Communication Required : n= 233 , Gruppen= 3 , η²= 0.058 , p= 0.0011 
##   - Interdependence : n= 226 , Gruppen= 3 , η²= 0.014 , p= 0.2173 
##   - Work Independence : n= 233 , Gruppen= 3 , η²= 0.014 , p= 0.1967 
##   - Perceived Task Complexity : n= 120 , Gruppen= 2 , η²= 0.007 , p= 0.3721
if(length(unreliable_comm_vars) > 0) {
  cat("⚠️ UNZUVERLÄSSIGE KOMMUNIKATIONS-ERGEBNISSE:\n")
  for(var in unreliable_comm_vars) {
    row <- repaired_results[repaired_results$Variable == var, ]
    problem <- ""
    if(row$N_total < 40) problem <- paste(problem, "KLEINE STICHPROBE")
    if(row$N_groups < 3) problem <- paste(problem, "FEHLENDE GRUPPEN")
    if(!is.na(row$Eta_squared) && row$Eta_squared > 0.8) problem <- paste(problem, "ÜBERSCHÄTZTE EFFEKTGRÖSSE")
    
    cat("  -", row$Variable_Clean, ": n=", row$N_total, ", η²=", row$Eta_squared, 
        " →", problem, "\n")
  }
  cat("\n")
}

cat("💡 EMPFEHLUNGEN FÜR KOMMUNIKATIONS-ANALYSE:\n")
## 💡 EMPFEHLUNGEN FÜR KOMMUNIKATIONS-ANALYSE:
cat("1. 🎯 Fokussiere nur auf Variablen mit n≥40 und mindestens 3 Kommunikationsformen\n")
## 1. 🎯 Fokussiere nur auf Variablen mit n≥40 und mindestens 3 Kommunikationsformen
cat("2. 📊 Verwende Omega² statt η² (konservativer bei kleinen Stichproben)\n")
## 2. 📊 Verwende Omega² statt η² (konservativer bei kleinen Stichproben)
cat("3. 🔍 Prüfe Balancierung der Kommunikationsgruppen\n")
## 3. 🔍 Prüfe Balancierung der Kommunikationsgruppen
cat("4. 📈 Sammle mehr Daten für unterrepräsentierte Kommunikationsformen\n")
## 4. 📈 Sammle mehr Daten für unterrepräsentierte Kommunikationsformen
cat("5. 🎨 Erstelle nur Boxplots für zuverlässige Variablen\n")
## 5. 🎨 Erstelle nur Boxplots für zuverlässige Variablen
cat("6. 🔄 Erwäge Zusammenfassung ähnlicher Kommunikationsformen\n\n")
## 6. 🔄 Erwäge Zusammenfassung ähnlicher Kommunikationsformen
# Zeige problematische Gruppierungen
cat("📊 STICHPROBENVERTEILUNG PRO KOMMUNIKATIONSFORM:\n")
## 📊 STICHPROBENVERTEILUNG PRO KOMMUNIKATIONSFORM:
for(var in names(combined_sample_sizes)) {
  if(var %in% unreliable_comm_vars) {
    cat("⚠️", var, ":\n")
    print(combined_sample_sizes[[var]])
    cat("\n")
  }
}

print("Kommunikations-Effektgrößen-Untersuchung abgeschlossen! 🔍")
## [1] "Kommunikations-Effektgrößen-Untersuchung abgeschlossen! 🔍"

Structural differences between the task types

# ================================================================================
# ERWEITERTE STRUKTURELLE UNTERSCHIEDE: MATH VS HP TASK
# Analyse mit neuer Datenstruktur und allen Team-Faktoren (Chat und Jitsi)
# ================================================================================

library(dplyr)
library(tidyr)
library(ggplot2)
library(lme4)
library(lmerTest)
library(car)
library(emmeans)
library(effectsize)
library(gridExtra)
library(broom)
library(broom.mixed)
library(RColorBrewer)

# ================================================================================
# SCHRITT 1: ERWEITERTE DATENAUFBEREITUNG FÜR TASK-VERGLEICH
# ================================================================================

print("=== SCHRITT 1: ERWEITERTE DATENAUFBEREITUNG FÜR TASK-VERGLEICH ===")
## [1] "=== SCHRITT 1: ERWEITERTE DATENAUFBEREITUNG FÜR TASK-VERGLEICH ==="
# ÄNDERUNG 1: Definiere getrennte Variablenlisten (wie bei Kommunikations-Analyse)
rundenweise_vars <- c(
  "flow_score", "stress_value", "individual_motivation_value",
  "valence_value", "arousal_value", "information_sharing_value", 
  "synchronization_value"
)

team_factor_vars <- c(
  "team_composition_value", "team_motivation_value", "interdependence_value", 
  "common_goal_value", "means_coordination_value",
  # Erweiterte Team-Faktoren
  "group_size_value", "group_diversity_value", "group_skill_value",
  "communication_required_value", "work_independence_value", "social_presence_value", "perceived_task_complexity_value"
)

# Kombiniere für Gesamtanalyse
all_vars <- c(rundenweise_vars, team_factor_vars)

# ÄNDERUNG 2: Erstelle erweiterte Datensätze für Task-Vergleich

# Für rundenweise Analysen (normale Runden, beide Tasks)
task_analysis_data_rounds <- integrated_data_full %>%
  filter(comm %in% c("Together_Chat", "Together_Jitsi"), round != "Post") %>%
  mutate(
    task = factor(task, levels = c("Math", "HP")),
    comm = factor(comm, levels = c("Together_Chat", "Together_Jitsi")),
    participant.code = factor(participant.code),
    round = as.numeric(round)
  ) %>%
  filter(rowSums(is.na(select(., all_of(rundenweise_vars)))) <= 3)  # Max 50% fehlende Werte

# ÄNDERUNG 3: Für Team-Faktoren Analysen (Post-Runden, beide Tasks)
task_analysis_data_team <- integrated_data_full %>%
  filter(comm %in% c("Together_Chat", "Together_Jitsi"), round == "Post") %>%
  mutate(
    task = factor(task, levels = c("Math", "HP")),
    comm = factor(comm, levels = c("Together_Chat", "Together_Jitsi")),
    participant.code = factor(participant.code)
  ) %>%
  filter(rowSums(is.na(select(., all_of(team_factor_vars)))) <= 5)  # Max 50% fehlende Werte

print(paste("Task-Analysedaten (rundenweise):", nrow(task_analysis_data_rounds), "Beobachtungen"))
## [1] "Task-Analysedaten (rundenweise): 805 Beobachtungen"
print(paste("Task-Analysedaten (Team-Faktoren):", nrow(task_analysis_data_team), "Beobachtungen"))
## [1] "Task-Analysedaten (Team-Faktoren): 240 Beobachtungen"
# Übersicht der Stichprobengrößen
task_sample_overview_rounds <- task_analysis_data_rounds %>%
  group_by(task, comm) %>%
  summarise(
    n_participants = n_distinct(participant.code),
    n_observations = n(),
    .groups = "drop"
  )

task_sample_overview_team <- task_analysis_data_team %>%
  group_by(task, comm) %>%
  summarise(
    n_participants = n_distinct(participant.code),
    n_observations = n(),
    .groups = "drop"
  )

print("Stichprobengrößen (rundenweise):")
## [1] "Stichprobengrößen (rundenweise):"
print(task_sample_overview_rounds)
## # A tibble: 4 × 4
##   task  comm           n_participants n_observations
##   <fct> <fct>                   <int>          <int>
## 1 Math  Together_Chat              60            233
## 2 Math  Together_Jitsi             60            227
## 3 HP    Together_Chat              60            171
## 4 HP    Together_Jitsi             60            174
print("\nStichprobengrößen (Team-Faktoren):")
## [1] "\nStichprobengrößen (Team-Faktoren):"
print(task_sample_overview_team)
## # A tibble: 4 × 4
##   task  comm           n_participants n_observations
##   <fct> <fct>                   <int>          <int>
## 1 Math  Together_Chat              60             60
## 2 Math  Together_Jitsi             60             60
## 3 HP    Together_Chat              60             60
## 4 HP    Together_Jitsi             60             60
# ================================================================================
# SCHRITT 2: ERWEITERTE DESKRIPTIVE STATISTIKEN
# ================================================================================

print("\n=== SCHRITT 2: ERWEITERTE DESKRIPTIVE STATISTIKEN NACH TASK ===")
## [1] "\n=== SCHRITT 2: ERWEITERTE DESKRIPTIVE STATISTIKEN NACH TASK ==="
# ÄNDERUNG 4: Getrennte deskriptive Statistiken

# Rundenweise Variablen (aggregiert pro Person)
task_descriptive_rounds <- task_analysis_data_rounds %>%
  group_by(participant.code, task, comm) %>%
  summarise(across(all_of(rundenweise_vars), ~ mean(.x, na.rm = TRUE)), .groups = "drop") %>%
  group_by(task) %>%
  summarise(
    across(all_of(rundenweise_vars), 
           list(
             mean = ~ mean(.x, na.rm = TRUE),
             sd = ~ sd(.x, na.rm = TRUE),
             n = ~ sum(!is.na(.x))
           ),
           .names = "{.col}_{.fn}"),
    .groups = "drop"
  )

# Team-Faktoren
task_descriptive_team <- task_analysis_data_team %>%
  group_by(task) %>%
  summarise(
    across(all_of(team_factor_vars), 
           list(
             mean = ~ mean(.x, na.rm = TRUE),
             sd = ~ sd(.x, na.rm = TRUE),
             n = ~ sum(!is.na(.x))
           ),
           .names = "{.col}_{.fn}"),
    .groups = "drop"
  )

print("Deskriptive Statistiken pro Task (rundenweise):")
## [1] "Deskriptive Statistiken pro Task (rundenweise):"
print(task_descriptive_rounds)
## # A tibble: 2 × 22
##   task  flow_score_mean flow_score_sd flow_score_n stress_value_mean
##   <fct>           <dbl>         <dbl>        <int>             <dbl>
## 1 Math             5.40         0.794          120              2.68
## 2 HP               5.33         0.940          120              2.45
## # ℹ 17 more variables: stress_value_sd <dbl>, stress_value_n <int>,
## #   individual_motivation_value_mean <dbl>,
## #   individual_motivation_value_sd <dbl>, individual_motivation_value_n <int>,
## #   valence_value_mean <dbl>, valence_value_sd <dbl>, valence_value_n <int>,
## #   arousal_value_mean <dbl>, arousal_value_sd <dbl>, arousal_value_n <int>,
## #   information_sharing_value_mean <dbl>, information_sharing_value_sd <dbl>,
## #   information_sharing_value_n <int>, synchronization_value_mean <dbl>, …
print("\nDeskriptive Statistiken pro Task (Team-Faktoren):")
## [1] "\nDeskriptive Statistiken pro Task (Team-Faktoren):"
print(task_descriptive_team)
## # A tibble: 2 × 37
##   task  team_composition_value_m…¹ team_composition_val…² team_composition_val…³
##   <fct>                      <dbl>                  <dbl>                  <int>
## 1 Math                        3.61                  0.524                    120
## 2 HP                          3.70                  0.385                    120
## # ℹ abbreviated names: ¹​team_composition_value_mean,
## #   ²​team_composition_value_sd, ³​team_composition_value_n
## # ℹ 33 more variables: team_motivation_value_mean <dbl>,
## #   team_motivation_value_sd <dbl>, team_motivation_value_n <int>,
## #   interdependence_value_mean <dbl>, interdependence_value_sd <dbl>,
## #   interdependence_value_n <int>, common_goal_value_mean <dbl>,
## #   common_goal_value_sd <dbl>, common_goal_value_n <int>, …
# ================================================================================
# SCHRITT 3: ERWEITERTE ANOVA-ANALYSEN FÜR TASK-VERGLEICH
# ================================================================================

print("\n=== SCHRITT 3: ERWEITERTE ANOVA-ANALYSEN (TASK-EFFEKTE) ===")
## [1] "\n=== SCHRITT 3: ERWEITERTE ANOVA-ANALYSEN (TASK-EFFEKTE) ==="
# ÄNDERUNG 5: Getrennte ANOVA-Funktionen für Task-Vergleich

# Funktion für rundenweise Variablen (Mixed-Effects)
analyze_task_round_variable <- function(var_name, data) {
  cat("\n", paste(rep("=", 60), collapse=""), "\n")
  cat("TASK-VERGLEICH (RUNDENWEISE) FÜR:", toupper(var_name), "\n")
  cat(paste(rep("=", 60), collapse=""), "\n")
  
  var_data <- data %>%
    filter(!is.na(!!sym(var_name))) %>%
    mutate(
      task = droplevels(task),
      comm = droplevels(comm)
    )
  
  task_counts <- var_data %>%
    group_by(task) %>%
    summarise(n = n(), .groups = "drop")
  
  if(nrow(task_counts) >= 2 && all(task_counts$n >= 3)) {
    tryCatch({
      # Mixed-Effects ANOVA mit Task als Hauptfaktor
      model <- lmer(as.formula(paste(var_name, "~ task + comm + (1|participant.code)")), 
                    data = var_data)
      
      anova_result <- anova(model)
      print(anova_result)
      
      # Effektgröße für Task-Effekt
      task_row <- which(rownames(anova_result) == "task")
      if(length(task_row) > 0) {
        f_stat <- anova_result$`F value`[task_row]
        df1 <- anova_result$NumDF[task_row] 
        df2 <- anova_result$DenDF[task_row]
        p_value <- anova_result$`Pr(>F)`[task_row]
        eta_squared <- f_stat * df1 / (f_stat * df1 + df2)
        
        cat("Task-Effekt - Partielle η² =", round(eta_squared, 3), "\n")
        
        # Post-hoc Tests
        if(p_value < 0.05) {
          cat("\n🔍 SIGNIFIKANTER TASK-EFFEKT!\n")
          emm_task <- emmeans(model, "task")
          pairwise_task <- pairs(emm_task, adjust = "bonferroni")
          
          print("Task-Vergleich:")
          print(pairwise_task)
          
          return(list(model = model, anova = anova_result, emmeans = emm_task,
                     pairwise = pairwise_task, eta_squared = eta_squared, significant = TRUE))
        }
        
        return(list(model = model, anova = anova_result, eta_squared = eta_squared, significant = FALSE))
      }
      
    }, error = function(e) {
      cat("❌ Fehler bei Mixed-Effects Modell:", e$message, "\n")
      return(NULL)
    })
  } else {
    cat("⚠️ Nicht genügend Daten\n")
    return(NULL)
  }
}

# Funktion für Team-Faktoren (einfache ANOVA)
analyze_task_team_variable <- function(var_name, data) {
  cat("\n", paste(rep("=", 60), collapse=""), "\n")
  cat("TASK-VERGLEICH (TEAM-FAKTOREN) FÜR:", toupper(var_name), "\n")
  cat(paste(rep("=", 60), collapse=""), "\n")
  
  var_data <- data %>%
    filter(!is.na(!!sym(var_name))) %>%
    mutate(
      task = droplevels(task),
      comm = droplevels(comm)
    )
  
  task_counts <- var_data %>%
    group_by(task) %>%
    summarise(n = n(), .groups = "drop")
  
  if(nrow(task_counts) >= 2 && all(task_counts$n >= 3)) {
    tryCatch({
      # Einfache ANOVA mit Task und Kommunikation
      model <- aov(as.formula(paste(var_name, "~ task + comm")), data = var_data)
      
      anova_result <- summary(model)
      print(anova_result)
      
      # Effektgröße für Task-Effekt
      if(length(anova_result) > 0 && nrow(anova_result[[1]]) > 0) {
        f_stat <- anova_result[[1]]$`F value`[1]  # Task ist erste Zeile
        df1 <- anova_result[[1]]$Df[1]
        df2 <- anova_result[[1]]$Df[length(anova_result[[1]]$Df)]  # Residuals
        p_value <- anova_result[[1]]$`Pr(>F)`[1]
        
        if(!is.na(f_stat) && !is.na(df1) && !is.na(df2)) {
          eta_squared <- f_stat * df1 / (f_stat * df1 + df2)
          cat("Task-Effekt - Partielle η² =", round(eta_squared, 3), "\n")
          
          # Post-hoc Tests
          if(p_value < 0.05) {
            cat("\n🔍 SIGNIFIKANTER TASK-EFFEKT!\n")
            posthoc <- TukeyHSD(model, "task")
            print(posthoc)
            
            return(list(model = model, anova = anova_result, posthoc = posthoc,
                       eta_squared = eta_squared, significant = TRUE))
          }
          
          return(list(model = model, anova = anova_result, eta_squared = eta_squared, significant = FALSE))
        }
      }
      
    }, error = function(e) {
      cat("❌ Fehler bei ANOVA:", e$message, "\n")
      return(NULL)
    })
  } else {
    cat("⚠️ Nicht genügend Daten\n")
    return(NULL)
  }
}

# ÄNDERUNG 6: Führe getrennte Task-Analysen durch

# Rundenweise Variablen analysieren
task_round_results <- list()
for(var in rundenweise_vars) {
  result <- analyze_task_round_variable(var, task_analysis_data_rounds)
  if(!is.null(result)) {
    task_round_results[[var]] <- result
  }
}
## 
##  ============================================================ 
## TASK-VERGLEICH (RUNDENWEISE) FÜR: FLOW_SCORE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##       Sum Sq Mean Sq NumDF  DenDF F value Pr(>F)
## task 0.77165 0.77165     1 685.01  1.2932 0.2559
## comm 0.52318 0.52318     1 116.71  0.8768 0.3510
## Task-Effekt - Partielle η² = 0.002 
## 
##  ============================================================ 
## TASK-VERGLEICH (RUNDENWEISE) FÜR: STRESS_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##       Sum Sq Mean Sq NumDF  DenDF F value   Pr(>F)   
## task 10.9060 10.9060     1 685.54  7.9255 0.005014 **
## comm  5.6452  5.6452     1 116.92  4.1025 0.045098 * 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.011 
## 
## 🔍 SIGNIFIKANTER TASK-EFFEKT!
## [1] "Task-Vergleich:"
##  contrast  estimate     SE  df t.ratio p.value
##  Math - HP    0.236 0.0838 687   2.815  0.0050
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## TASK-VERGLEICH (RUNDENWEISE) FÜR: INDIVIDUAL_MOTIVATION_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##      Sum Sq Mean Sq NumDF  DenDF F value   Pr(>F)   
## task 9.1209  9.1209     1 683.23  9.4108 0.002242 **
## comm 0.8995  0.8995     1 114.82  0.9281 0.337386   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.014 
## 
## 🔍 SIGNIFIKANTER TASK-EFFEKT!
## [1] "Task-Vergleich:"
##  contrast  estimate     SE  df t.ratio p.value
##  Math - HP   -0.216 0.0704 686  -3.068  0.0022
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## TASK-VERGLEICH (RUNDENWEISE) FÜR: VALENCE_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##       Sum Sq Mean Sq NumDF  DenDF F value    Pr(>F)    
## task 125.054 125.054     1 682.52 47.4445 1.287e-11 ***
## comm  10.275  10.275     1 112.98  3.8984   0.05077 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.065 
## 
## 🔍 SIGNIFIKANTER TASK-EFFEKT!
## [1] "Task-Vergleich:"
##  contrast  estimate    SE  df t.ratio p.value
##  Math - HP   -0.799 0.116 687  -6.888  <.0001
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## TASK-VERGLEICH (RUNDENWEISE) FÜR: AROUSAL_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##      Sum Sq Mean Sq NumDF  DenDF F value    Pr(>F)    
## task 46.080  46.080     1 685.25 21.4556 4.334e-06 ***
## comm  1.385   1.385     1 117.75  0.6451    0.4235    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.03 
## 
## 🔍 SIGNIFIKANTER TASK-EFFEKT!
## [1] "Task-Vergleich:"
##  contrast  estimate    SE  df t.ratio p.value
##  Math - HP    0.485 0.105 685   4.632  <.0001
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## TASK-VERGLEICH (RUNDENWEISE) FÜR: INFORMATION_SHARING_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##      Sum Sq Mean Sq NumDF  DenDF F value    Pr(>F)    
## task 85.537  85.537     1 682.89  68.799 5.805e-16 ***
## comm 16.257  16.257     1 114.38  13.076 0.0004463 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.092 
## 
## 🔍 SIGNIFIKANTER TASK-EFFEKT!
## [1] "Task-Vergleich:"
##  contrast  estimate     SE  df t.ratio p.value
##  Math - HP   -0.661 0.0797 686  -8.294  <.0001
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## TASK-VERGLEICH (RUNDENWEISE) FÜR: SYNCHRONIZATION_VALUE 
## ============================================================ 
## Type III Analysis of Variance Table with Satterthwaite's method
##       Sum Sq Mean Sq NumDF  DenDF F value   Pr(>F)   
## task  0.0348  0.0348     1 684.15  0.0196 0.888733   
## comm 14.0931 14.0931     1 114.04  7.9230 0.005751 **
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0
# Team-Faktoren analysieren
task_team_results <- list()
for(var in team_factor_vars) {
  result <- analyze_task_team_variable(var, task_analysis_data_team)
  if(!is.null(result)) {
    task_team_results[[var]] <- result
  }
}
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: TEAM_COMPOSITION_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)
## task          1   0.55  0.5457   2.577  0.110
## comm          1   0.16  0.1556   0.735  0.392
## Residuals   237  50.19  0.2118               
## Task-Effekt - Partielle η² = 0.011 
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: TEAM_MOTIVATION_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)
## task          1   0.78  0.7782   1.431  0.233
## comm          1   0.06  0.0560   0.103  0.748
## Residuals   237 128.85  0.5437               
## Task-Effekt - Partielle η² = 0.006 
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: INTERDEPENDENCE_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)  
## task          1    7.7   7.704   2.762 0.0979 .
## comm          1    0.5   0.504   0.181 0.6711  
## Residuals   237  661.2   2.790                 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.012 
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: COMMON_GOAL_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)  
## task          1   2.43  2.4334   3.479 0.0634 .
## comm          1   1.38  1.3751   1.966 0.1622  
## Residuals   237 165.78  0.6995                 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.014 
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: MEANS_COORDINATION_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value   Pr(>F)    
## task          1   15.5   15.50   7.298   0.0074 ** 
## comm          1   44.2   44.20  20.806 8.15e-06 ***
## Residuals   237  503.5    2.12                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.03 
## 
## 🔍 SIGNIFIKANTER TASK-EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ task + comm")), data = var_data)
## 
## $task
##              diff       lwr       upr     p adj
## HP-Math 0.5083333 0.1376251 0.8790416 0.0074037
## 
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: GROUP_SIZE_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)
## task          1   0.07  0.0667   0.233  0.630
## comm          1   0.54  0.5352   1.870  0.173
## Residuals   237  67.83  0.2862               
## Task-Effekt - Partielle η² = 0.001 
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: GROUP_DIVERSITY_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)  
## task          1   2.14  2.1407   3.301 0.0705 .
## comm          1   0.03  0.0296   0.046 0.8309  
## Residuals   237 153.71  0.6486                 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.014 
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: GROUP_SKILL_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)
## task          1   0.24  0.2449   0.496  0.482
## comm          1   0.39  0.3894   0.789  0.375
## Residuals   237 116.98  0.4936               
## Task-Effekt - Partielle η² = 0.002 
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: COMMUNICATION_REQUIRED_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value   Pr(>F)    
## task          1  199.8   199.8   83.27  < 2e-16 ***
## comm          1   44.2    44.2   18.42 2.58e-05 ***
## Residuals   237  568.8     2.4                     
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.26 
## 
## 🔍 SIGNIFIKANTER TASK-EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ task + comm")), data = var_data)
## 
## $task
##          diff      lwr      upr p adj
## HP-Math 1.825 1.431011 2.218989     0
## 
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: WORK_INDEPENDENCE_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value  Pr(>F)    
## task          1  112.1  112.07  34.545 1.4e-08 ***
## comm          1   13.1   13.07   4.028  0.0459 *  
## Residuals   237  768.8    3.24                    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.127 
## 
## 🔍 SIGNIFIKANTER TASK-EFFEKT!
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = as.formula(paste(var_name, "~ task + comm")), data = var_data)
## 
## $task
##              diff       lwr        upr p adj
## HP-Math -1.366667 -1.824748 -0.9085854     0
## 
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: SOCIAL_PRESENCE_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value  Pr(>F)   
## task          1    8.7   8.740   3.851 0.05087 . 
## comm          1   17.4  17.388   7.662 0.00608 **
## Residuals   237  537.8   2.269                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## Task-Effekt - Partielle η² = 0.016 
## 
##  ============================================================ 
## TASK-VERGLEICH (TEAM-FAKTOREN) FÜR: PERCEIVED_TASK_COMPLEXITY_VALUE 
## ============================================================ 
##              Df Sum Sq Mean Sq F value Pr(>F)
## task          1    0.1  0.1378   0.076  0.783
## comm          1    0.1  0.1378   0.076  0.783
## Residuals   237  430.0  1.8144               
## Task-Effekt - Partielle η² = 0
# ================================================================================
# SCHRITT 4: ERWEITERTE ZUSAMMENFASSUNG DER TASK-UNTERSCHIEDE
# ================================================================================

print("\n=== SCHRITT 4: ERWEITERTE ZUSAMMENFASSUNG DER TASK-UNTERSCHIEDE ===")
## [1] "\n=== SCHRITT 4: ERWEITERTE ZUSAMMENFASSUNG DER TASK-UNTERSCHIEDE ==="
# Sammle signifikante Variablen
task_significant_round_vars <- names(task_round_results)[sapply(task_round_results, function(x) x$significant)]
task_significant_team_vars <- names(task_team_results)[sapply(task_team_results, function(x) x$significant)]

cat("📊 SIGNIFIKANTE TASK-UNTERSCHIEDE (RUNDENWEISE):\n")
## 📊 SIGNIFIKANTE TASK-UNTERSCHIEDE (RUNDENWEISE):
if(length(task_significant_round_vars) > 0) {
  for(var in task_significant_round_vars) {
    eta_sq <- task_round_results[[var]]$eta_squared
    cat("✅", toupper(var), "- η² =", round(eta_sq, 3), "\n")
  }
} else {
  cat("❌ Keine signifikanten rundenweisen Task-Unterschiede\n")
}
## ✅ STRESS_VALUE - η² = 0.011 
## ✅ INDIVIDUAL_MOTIVATION_VALUE - η² = 0.014 
## ✅ VALENCE_VALUE - η² = 0.065 
## ✅ AROUSAL_VALUE - η² = 0.03 
## ✅ INFORMATION_SHARING_VALUE - η² = 0.092
cat("\n📊 SIGNIFIKANTE TASK-UNTERSCHIEDE (TEAM-FAKTOREN):\n")
## 
## 📊 SIGNIFIKANTE TASK-UNTERSCHIEDE (TEAM-FAKTOREN):
if(length(task_significant_team_vars) > 0) {
  for(var in task_significant_team_vars) {
    eta_sq <- task_team_results[[var]]$eta_squared
    cat("✅", toupper(var), "- η² =", round(eta_sq, 3), "\n")
  }
} else {
  cat("❌ Keine signifikanten Team-Faktoren Task-Unterschiede\n")
}
## ✅ MEANS_COORDINATION_VALUE - η² = 0.03 
## ✅ COMMUNICATION_REQUIRED_VALUE - η² = 0.26 
## ✅ WORK_INDEPENDENCE_VALUE - η² = 0.127
# Erstelle erweiterte Ergebnistabelle
create_task_results_table <- function() {
  results_df <- data.frame(
    Variable = all_vars,
    Type = ifelse(all_vars %in% rundenweise_vars, "Rundenweise", "Team-Faktoren"),
    stringsAsFactors = FALSE
  )
  
  # Initialisiere Spalten
  results_df$Signifikant <- FALSE
  results_df$Eta_squared <- NA_real_
  results_df$P_value <- NA_real_
  results_df$N_total <- 0
  
  # Rundenweise Ergebnisse
  for(var in names(task_round_results)) {
    idx <- which(results_df$Variable == var)
    result <- task_round_results[[var]]
    
    results_df$Signifikant[idx] <- result$significant
    results_df$Eta_squared[idx] <- round(result$eta_squared, 3)
    
    # P-Wert extrahieren
    task_row <- which(rownames(result$anova) == "task")
    if(length(task_row) > 0) {
      results_df$P_value[idx] <- round(result$anova$`Pr(>F)`[task_row], 4)
    }
    
    # Stichprobengröße
    var_data <- task_analysis_data_rounds %>% filter(!is.na(!!sym(var)))
    results_df$N_total[idx] <- nrow(var_data)
  }
  
  # Team-Faktoren Ergebnisse
  for(var in names(task_team_results)) {
    idx <- which(results_df$Variable == var)
    result <- task_team_results[[var]]
    
    results_df$Signifikant[idx] <- result$significant
    results_df$Eta_squared[idx] <- round(result$eta_squared, 3)
    
    # P-Wert extrahieren
    if(length(result$anova) > 0 && nrow(result$anova[[1]]) > 0) {
      results_df$P_value[idx] <- round(result$anova[[1]]$`Pr(>F)`[1], 4)
    }
    
    # Stichprobengröße
    var_data <- task_analysis_data_team %>% filter(!is.na(!!sym(var)))
    results_df$N_total[idx] <- nrow(var_data)
  }
  
  # Variable Labels hinzufügen
  results_df$Variable_Clean <- variable_labels[results_df$Variable]
  results_df$Variable_Clean <- ifelse(is.na(results_df$Variable_Clean), 
                                     results_df$Variable, results_df$Variable_Clean)
  
  # Sortiere
  results_df <- results_df %>%
    arrange(Type, desc(Signifikant), desc(Eta_squared))
  
  return(results_df)
}

task_results_extended <- create_task_results_table()

print("ERWEITERTE TASK-VERGLEICH ERGEBNISÜBERSICHT:")
## [1] "ERWEITERTE TASK-VERGLEICH ERGEBNISÜBERSICHT:"
print(task_results_extended %>% 
      select(Variable_Clean, Type, Signifikant, P_value, Eta_squared, N_total))
##               Variable_Clean          Type Signifikant P_value Eta_squared
## 1        Information Sharing   Rundenweise        TRUE  0.0000       0.092
## 2                    Valence   Rundenweise        TRUE  0.0000       0.065
## 3                    Arousal   Rundenweise        TRUE  0.0000       0.030
## 4      Individual Motivation   Rundenweise        TRUE  0.0022       0.014
## 5                     Stress   Rundenweise        TRUE  0.0050       0.011
## 6                 Flow Score   Rundenweise       FALSE  0.2559       0.002
## 7            Synchronization   Rundenweise       FALSE  0.8887       0.000
## 8     Communication Required Team-Faktoren        TRUE  0.0000       0.260
## 9          Work Independence Team-Faktoren        TRUE  0.0000       0.127
## 10    Means for Coordination Team-Faktoren        TRUE  0.0074       0.030
## 11           Social Presence Team-Faktoren       FALSE  0.0509       0.016
## 12               Common Goal Team-Faktoren       FALSE  0.0634       0.014
## 13           Group Diversity Team-Faktoren       FALSE  0.0705       0.014
## 14           Interdependence Team-Faktoren       FALSE  0.0979       0.012
## 15          Team Composition Team-Faktoren       FALSE  0.1098       0.011
## 16           Team Motivation Team-Faktoren       FALSE  0.2327       0.006
## 17               Group Skill Team-Faktoren       FALSE  0.4819       0.002
## 18                Group Size Team-Faktoren       FALSE  0.6298       0.001
## 19 Perceived Task Complexity Team-Faktoren       FALSE  0.7831       0.000
##    N_total
## 1      805
## 2      805
## 3      805
## 4      805
## 5      805
## 6      805
## 7      805
## 8      240
## 9      240
## 10     240
## 11     240
## 12     240
## 13     240
## 14     240
## 15     240
## 16     240
## 17     240
## 18     240
## 19     240
# ================================================================================
# SCHRITT 5: ERWEITERTE BOXPLOTS FÜR TASK-VERGLEICH
# ================================================================================

print("\n=== SCHRITT 5: ERWEITERTE BOXPLOTS FÜR TASK-VERGLEICH ===")
## [1] "\n=== SCHRITT 5: ERWEITERTE BOXPLOTS FÜR TASK-VERGLEICH ==="
# ÄNDERUNG 7: Bereite erweiterte Plot-Daten vor
prepare_task_plot_data <- function() {
  
  # Rundenweise Daten (aggregiert pro Person)
  round_task_data <- task_analysis_data_rounds %>%
    group_by(participant.code, task, comm) %>%
    summarise(across(all_of(rundenweise_vars), ~ mean(.x, na.rm = TRUE)), .groups = "drop") %>%
    pivot_longer(cols = all_of(rundenweise_vars), names_to = "variable", values_to = "value") %>%
    filter(!is.na(value)) %>%
    mutate(type = "Rundenweise")
  
  # Team-Faktoren Daten
  team_task_data <- task_analysis_data_team %>%
    select(participant.code, task, comm, all_of(team_factor_vars)) %>%
    pivot_longer(cols = all_of(team_factor_vars), names_to = "variable", values_to = "value") %>%
    filter(!is.na(value)) %>%
    mutate(type = "Team-Faktoren")
  
  # Kombiniere beide
  combined_task_data <- bind_rows(round_task_data, team_task_data) %>%
    mutate(
      variable_clean = variable_labels[variable],
      variable_clean = ifelse(is.na(variable_clean), variable, variable_clean)
    )
  
  return(combined_task_data)
}

task_plot_data_extended <- prepare_task_plot_data()

# Erstelle erweiterte Task-Boxplots
create_extended_task_boxplot <- function(var_name, data = task_plot_data_extended) {
  var_data <- data %>% filter(variable == var_name)
  
  if(nrow(var_data) == 0) return(NULL)
  
  var_label <- unique(var_data$variable_clean)[1]
  var_type <- unique(var_data$type)[1]
  
  # Verschiedene Farben für Task-Vergleich
  colors <- c("Math" = "#4A90E2", "HP" = "#E24A4A")
  
  ggplot(var_data, aes(x = task, y = value, fill = task)) +
    geom_boxplot(alpha = 0.7, outlier.size = 1.5) +
    geom_jitter(width = 0.2, alpha = 0.4, size = 1) +
    scale_fill_manual(values = colors) +
    labs(
      title = paste(var_label, paste0("(", var_type, ")")),
      x = "Task Type",
      y = "Value"
    ) +
    theme_minimal() +
    theme(
      plot.title = element_text(size = 11, face = "bold", hjust = 0.5),
      axis.text.x = element_text(size = 11),
      axis.text.y = element_text(size = 10),
      legend.position = "none"
    )
}

# Erstelle alle erweiterten Task-Boxplots
extended_task_boxplots <- list()
for(var in all_vars) {
  if(var %in% unique(task_plot_data_extended$variable)) {
    cat("Erstelle erweiterten Task-Boxplot für:", var, "\n")
    plot <- create_extended_task_boxplot(var, task_plot_data_extended)
    if(!is.null(plot)) {
      extended_task_boxplots[[var]] <- plot
    }
  }
}
## Erstelle erweiterten Task-Boxplot für: flow_score 
## Erstelle erweiterten Task-Boxplot für: stress_value 
## Erstelle erweiterten Task-Boxplot für: individual_motivation_value 
## Erstelle erweiterten Task-Boxplot für: valence_value 
## Erstelle erweiterten Task-Boxplot für: arousal_value 
## Erstelle erweiterten Task-Boxplot für: information_sharing_value 
## Erstelle erweiterten Task-Boxplot für: synchronization_value 
## Erstelle erweiterten Task-Boxplot für: team_composition_value 
## Erstelle erweiterten Task-Boxplot für: team_motivation_value 
## Erstelle erweiterten Task-Boxplot für: interdependence_value 
## Erstelle erweiterten Task-Boxplot für: common_goal_value 
## Erstelle erweiterten Task-Boxplot für: means_coordination_value 
## Erstelle erweiterten Task-Boxplot für: group_size_value 
## Erstelle erweiterten Task-Boxplot für: group_diversity_value 
## Erstelle erweiterten Task-Boxplot für: group_skill_value 
## Erstelle erweiterten Task-Boxplot für: communication_required_value 
## Erstelle erweiterten Task-Boxplot für: work_independence_value 
## Erstelle erweiterten Task-Boxplot für: social_presence_value 
## Erstelle erweiterten Task-Boxplot für: perceived_task_complexity_value
# Zeige alle Plots
if(length(extended_task_boxplots) > 0) {
  n_plots <- length(extended_task_boxplots)
  ncol <- min(6, ceiling(sqrt(n_plots)))
  nrow <- ceiling(n_plots / ncol)
  
  grid.arrange(grobs = extended_task_boxplots, ncol = ncol, nrow = nrow,
               top = "Extended Task Comparison: Math vs Hidden Profile (All Variables)")
}

# ================================================================================
# SCHRITT 6: TEAM-FAKTOREN PROFIL-PLOT MIT BOXPLOTS (WIE IN DEINEM BILD)
# ================================================================================
print("\n=== SCHRITT 6: TEAM-FAKTOREN PROFIL-PLOT ===")
## [1] "\n=== SCHRITT 6: TEAM-FAKTOREN PROFIL-PLOT ==="
create_task_team_profile_plot <- function(data) {
  
  # Bereite Daten für Boxplot vor (ohne Aggregation - wir brauchen alle Einzelwerte)
  profile_data <- data %>%
    pivot_longer(cols = all_of(team_factor_vars), names_to = "factor", values_to = "score") %>%
    filter(!is.na(score)) %>%
    mutate(
      factor_clean = variable_labels[factor],
      factor_clean = ifelse(is.na(factor_clean), factor, factor_clean),
      # Gruppiere Faktoren nach Kategorien (wie in deinem Bild)
      category = case_when(
        factor %in% c("interdependence_value", "common_goal_value", "means_coordination_value") ~ "Flow Pre-Conditions\n(in Teams)",
        factor %in% c("group_size_value", "group_diversity_value", "group_skill_value", "team_composition_value") ~ "Team Composition",
        factor %in% c("communication_required_value", "work_independence_value", "social_presence_value", "perceived_task_complexity_value") ~ "Team Interaction\nExperience",
        TRUE ~ "Other"
      ),
      # Kürze die Faktornamen für bessere Lesbarkeit
      factor_clean = case_when(
        factor_clean == "Clear Goal &\nProcess" ~ "Clear Goal &\nProcess",
        factor_clean == "Coordination\nPossible" ~ "Coordination\nPossible", 
        factor_clean == "Mutual\nDependence" ~ "Mutual\nDependence",
        factor_clean == "Group Size\nJust Right" ~ "Group Size\nJust Right",
        factor_clean == "Adequate Perspective\nDiversity" ~ "Adequate Perspective\nDiversity",
        factor_clean == "Complementary\nSkills" ~ "Complementary\nSkills",
        factor_clean == "Communication\nRequired" ~ "Communication\nRequired",
        factor_clean == "Social\nPresence" ~ "Social\nPresence",
        TRUE ~ factor_clean
      )
    ) %>%
    filter(category != "Other")  # Entferne nicht kategorisierte Faktoren
  
  # Berechne Mittelwerte für die Beschriftung
  mean_data <- profile_data %>%
    group_by(factor_clean, task, category) %>%
    summarise(mean_score = mean(score, na.rm = TRUE), .groups = "drop")
  
  # Erstelle den Boxplot im Stil deines Bildes
  ggplot(profile_data, aes(x = factor_clean, y = score, fill = task)) +
    geom_boxplot(position = position_dodge(width = 0.6), 
                 alpha = 0.7, 
                 outlier.shape = 16,
                 outlier.size = 1.5,
                 width = 0.6) +
    # Füge Mittelwerte als Text hinzu (feste Position über den Boxplots)
    geom_text(data = mean_data, 
              aes(x = factor_clean, y = mean_score, label = round(mean_score, 1)),
              position = position_dodge(width = 0.6), 
              vjust = 0.5, 
              size = 2.0, 
              fontface = "bold",
              color = "black") +
    facet_wrap(~ category, scales = "free_x", ncol = 3, labeller = label_wrap_gen(width = 20)) +
    scale_fill_manual(values = c("Math" = "#2E86AB", "HP" = "#A23B72"),
                      labels = c("Math Task", "Hidden Profile Task")) +
    scale_y_continuous(limits = c(1, 7), breaks = 1:7) +
    labs(
      title = "Perceptions of the group task",
      x = "Self-Reported Construct",
      y = "Response (7Pt-Likert)",
      fill = "Task Type",
      caption = "Figure 12: Perceptions of the task format. Crosshairs and numbers show mean averages."
    ) +
    theme_minimal() +
    theme(
      # Schräge x-Achsen-Labels zur Vermeidung von Überlappungen
      axis.text.x = element_text(angle = 45, hjust = 1, size = 7, lineheight = 0.9),
      axis.text.y = element_text(size = 8),
      axis.title.x = element_text(size = 9, face = "bold", margin = margin(t = 8)),
      axis.title.y = element_text(size = 9, face = "bold"),
      
      # Facet-Styling mit kleinerer Schrift
      strip.text = element_text(size = 8, face = "bold", margin = margin(4,4,4,4)),
      strip.background = element_rect(fill = "gray90", color = "black", size = 0.5),
      
      # Plot-Titel und Caption
      plot.title = element_text(size = 11, face = "bold", hjust = 0.5, margin = margin(b = 8)),
      plot.caption = element_text(size = 7, hjust = 0.5, face = "italic", margin = margin(t = 8)),
      
      # Legende mit kleinerer Schrift
      legend.position = "bottom",
      legend.title = element_text(size = 8, face = "bold"),
      legend.text = element_text(size = 7),
      legend.margin = margin(t = 10),
      
      # Grid-Linien
      panel.grid.minor = element_blank(),
      panel.grid.major.x = element_blank(),
      panel.grid.major.y = element_line(color = "gray90", size = 0.3),
      
      # Gesamt-Layout
      plot.margin = margin(10, 10, 10, 10),
      panel.spacing = unit(1, "lines")  # Mehr Platz zwischen Facets
    )
}

# Erstelle den Team-Faktoren Profil-Plot
if(nrow(task_analysis_data_team) > 0) {
  team_profile_plot <- create_task_team_profile_plot(task_analysis_data_team)
  
  # Optional: Speichere den Plot in höherer Auflösung
  ggsave("team_profile_boxplot.png", team_profile_plot, 
         width = 14, height = 8, dpi = 300, bg = "white")
  
  print(team_profile_plot)
}
## Warning: The `size` argument of `element_line()` is deprecated as of ggplot2 3.4.0.
## ℹ Please use the `linewidth` argument instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## Warning: The `size` argument of `element_rect()` is deprecated as of ggplot2 3.4.0.
## ℹ Please use the `linewidth` argument instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.

# ================================================================================
# SCHRITT 7: FINALE EMPFEHLUNGEN FÜR ERWEITERTE TASK-UNTERSCHIEDE
# ================================================================================

cat("\n", paste(rep("=", 80), collapse=""), "\n")
## 
##  ================================================================================
cat("EMPFEHLUNGEN FÜR ERWEITERTE TASK-UNTERSCHIEDE ANALYSE\n")
## EMPFEHLUNGEN FÜR ERWEITERTE TASK-UNTERSCHIEDE ANALYSE
cat(paste(rep("=", 80), collapse=""), "\n")
## ================================================================================
total_significant <- length(task_significant_round_vars) + length(task_significant_team_vars)

if(total_significant > 0) {
  cat("✅ Du hast", total_significant, "signifikante Task-Unterschiede gefunden!\n\n")
  cat("Interpretation:\n")
  cat("• Math und HP Tasks unterscheiden sich in wichtigen Aspekten\n")
  cat("• Diese Unterschiede zeigen sich sowohl in rundenweisen als auch Team-Faktoren\n")
  cat("• Task-spezifische Mediationen sind wahrscheinlich\n\n")
  cat("Nächste Schritte:\n")
  cat("1. 🎯 Fokussiere auf die", total_significant, "signifikanten Variablen für Mediationsanalysen\n")
  cat("2. 📊 Führe separate Analysen pro Task durch\n") 
  cat("3. 🔍 Prüfe Task × Communication Interaktionen\n")
  cat("4. 📈 Verwende Task als Moderator in deinen Modellen\n")
} else {
  cat("ℹ️ Keine signifikanten Task-Unterschiede in den erweiterten Variablen gefunden.\n\n")
  cat("Interpretation:\n")
  cat("• Math und HP Tasks wirken ähnlich auf diese Faktoren\n")
  cat("• Kommunikationseffekte sind vermutlich task-unabhängig\n")
  cat("• Focus kann auf Kommunikationsformen gelegt werden\n\n")
  cat("Empfehlungen:\n")
  cat("1. 🎯 Kombiniere beide Tasks für mehr statistische Power\n")
  cat("2. 📈 Fokussiere primär auf Kommunikationseffekte\n")
  cat("3. 🔍 Prüfe ob kleine Task-Unterschiede praktisch relevant sind\n")
}
## ✅ Du hast 8 signifikante Task-Unterschiede gefunden!
## 
## Interpretation:
## • Math und HP Tasks unterscheiden sich in wichtigen Aspekten
## • Diese Unterschiede zeigen sich sowohl in rundenweisen als auch Team-Faktoren
## • Task-spezifische Mediationen sind wahrscheinlich
## 
## Nächste Schritte:
## 1. 🎯 Fokussiere auf die 8 signifikanten Variablen für Mediationsanalysen
## 2. 📊 Führe separate Analysen pro Task durch
## 3. 🔍 Prüfe Task × Communication Interaktionen
## 4. 📈 Verwende Task als Moderator in deinen Modellen
cat("\n📊 VERFÜGBARE ERWEITERTE VARIABLEN:\n")
## 
## 📊 VERFÜGBARE ERWEITERTE VARIABLEN:
cat("Rundenweise (", length(rundenweise_vars), "):", paste(rundenweise_vars, collapse=", "), "\n")
## Rundenweise ( 7 ): flow_score, stress_value, individual_motivation_value, valence_value, arousal_value, information_sharing_value, synchronization_value
cat("Team-Faktoren (", length(team_factor_vars), "):", paste(team_factor_vars, collapse=", "), "\n")
## Team-Faktoren ( 12 ): team_composition_value, team_motivation_value, interdependence_value, common_goal_value, means_coordination_value, group_size_value, group_diversity_value, group_skill_value, communication_required_value, work_independence_value, social_presence_value, perceived_task_complexity_value
print("🎯 Erweiterte Task-Analyse abgeschlossen!")
## [1] "🎯 Erweiterte Task-Analyse abgeschlossen!"
# ================================================================================
# 1. DATENVERFÜGBARKEIT PRO VARIABLE PRÜFEN (KORRIGIERT)
# ================================================================================

print("\n1. DATENVERFÜGBARKEIT PRO VARIABLE (KORRIGIERT):")
## [1] "\n1. DATENVERFÜGBARKEIT PRO VARIABLE (KORRIGIERT):"
# Kombiniere rundenweise und Team-Daten für Verfügbarkeitsanalyse
prepare_combined_availability_data <- function() {
  
  # Rundenweise Daten (aggregiert pro Person für Konsistenz)
  round_availability <- task_analysis_data_rounds %>%
    group_by(participant.code, task, comm) %>%
    summarise(across(all_of(rundenweise_vars), ~ mean(.x, na.rm = TRUE)), .groups = "drop") %>%
    mutate(data_source = "rounds")
  
  # Team-Faktoren Daten
  team_availability <- task_analysis_data_team %>%
    select(participant.code, task, comm, all_of(team_factor_vars)) %>%
    mutate(data_source = "team")
  
  # Kombiniere für Verfügbarkeitsanalyse
  combined_data <- bind_rows(
    round_availability %>% select(participant.code, task, comm, all_of(rundenweise_vars), data_source),
    team_availability %>% select(participant.code, task, comm, all_of(team_factor_vars), data_source)
  )
  
  return(combined_data)
}

# Analysiere Datenverfügbarkeit für alle erweiterten Variablen
analyze_data_availability <- function() {
  
  # Rundenweise Verfügbarkeit (ALLE Beobachtungen, nicht aggregiert)
  round_availability <- task_analysis_data_rounds %>%
    select(task, all_of(rundenweise_vars)) %>%
    pivot_longer(cols = all_of(rundenweise_vars), names_to = "variable", values_to = "value") %>%
    group_by(variable, task) %>%
    summarise(
      n_available = sum(!is.na(value)),
      n_missing = sum(is.na(value)),
      pct_available = round(100 * sum(!is.na(value)) / n(), 1),
      data_type = "Rundenweise",
      .groups = "drop"
    )
  
  # Team-Faktoren Verfügbarkeit
  team_availability <- task_analysis_data_team %>%
    select(task, all_of(team_factor_vars)) %>%
    pivot_longer(cols = all_of(team_factor_vars), names_to = "variable", values_to = "value") %>%
    group_by(variable, task) %>%
    summarise(
      n_available = sum(!is.na(value)),
      n_missing = sum(is.na(value)),
      pct_available = round(100 * sum(!is.na(value)) / n(), 1),
      data_type = "Team-Faktoren",
      .groups = "drop"
    )
  
  # Kombiniere beide
  combined_availability <- bind_rows(round_availability, team_availability) %>%
    pivot_wider(names_from = task, values_from = c(n_available, n_missing, pct_available)) %>%
    mutate(
      total_available = n_available_Math + n_available_HP,
      variable_clean = variable_labels[variable],
      variable_clean = ifelse(is.na(variable_clean), variable, variable_clean)
    ) %>%
    arrange(data_type, desc(total_available))
  
  return(combined_availability)
}

data_availability_extended <- analyze_data_availability()

print("Erweiterte Datenverfügbarkeit pro Variable:")
## [1] "Erweiterte Datenverfügbarkeit pro Variable:"
print(data_availability_extended %>% 
      select(variable_clean, data_type, n_available_Math, n_available_HP, 
             total_available, pct_available_Math, pct_available_HP))
## # A tibble: 19 × 7
##    variable_clean      data_type n_available_Math n_available_HP total_available
##    <chr>               <chr>                <int>          <int>           <int>
##  1 Arousal             Rundenwe…              460            345             805
##  2 Flow Score          Rundenwe…              460            345             805
##  3 Individual Motivat… Rundenwe…              460            345             805
##  4 Information Sharing Rundenwe…              460            345             805
##  5 Stress              Rundenwe…              460            345             805
##  6 Synchronization     Rundenwe…              460            345             805
##  7 Valence             Rundenwe…              460            345             805
##  8 Common Goal         Team-Fak…              120            120             240
##  9 Communication Requ… Team-Fak…              120            120             240
## 10 Group Diversity     Team-Fak…              120            120             240
## 11 Group Size          Team-Fak…              120            120             240
## 12 Group Skill         Team-Fak…              120            120             240
## 13 Interdependence     Team-Fak…              120            120             240
## 14 Means for Coordina… Team-Fak…              120            120             240
## 15 Perceived Task Com… Team-Fak…              120            120             240
## 16 Social Presence     Team-Fak…              120            120             240
## 17 Team Composition    Team-Fak…              120            120             240
## 18 Team Motivation     Team-Fak…              120            120             240
## 19 Work Independence   Team-Fak…              120            120             240
## # ℹ 2 more variables: pct_available_Math <dbl>, pct_available_HP <dbl>
# ================================================================================
# 2. KORRIGIERTE EFFEKTGRÖSSEN-BERECHNUNG FÜR ALLE VARIABLEN
# ================================================================================

print("\n2. KORRIGIERTE EFFEKTGRÖSSEN-BERECHNUNG FÜR ALLE VARIABLEN:")
## [1] "\n2. KORRIGIERTE EFFEKTGRÖSSEN-BERECHNUNG FÜR ALLE VARIABLEN:"
# Funktion für Effektgrößenberechnung (rundenweise Variablen)
calculate_round_effect_sizes <- function(var, data = task_analysis_data_rounds) {
  
  cat("\n--- ANALYSE FÜR (RUNDENWEISE):", var, "---\n")
  
  # Verwende ALLE Einzelbeobachtungen (wie in ursprünglicher Analyse)
  var_data <- data %>%
    filter(!is.na(!!sym(var))) %>%
    mutate(
      task = droplevels(task),
      comm = droplevels(comm),
      value = !!sym(var)  # Direkte Verwendung der Variable
    )
  
  # Prüfe Stichprobengröße
  task_counts <- var_data %>%
    group_by(task) %>%
    summarise(n = n(), .groups = "drop")
  
  cat("Stichprobengrößen:\n")
  print(task_counts)
  
  if(nrow(task_counts) >= 2 && all(task_counts$n >= 3)) {
    
    tryCatch({
      # Mixed-Effects Modell
      model <- lmer(value ~ task + comm + (1|participant.code), data = var_data)
      
      anova_result <- anova(model)
      
      # Finde Task-Effekt
      task_row <- which(rownames(anova_result) == "task")
      
      if(length(task_row) > 0) {
        f_stat <- anova_result$`F value`[task_row]
        df1 <- anova_result$NumDF[task_row] 
        df2 <- anova_result$DenDF[task_row]
        p_value <- anova_result$`Pr(>F)`[task_row]
        
        # Partielle Eta-Quadrat
        eta_squared <- f_stat * df1 / (f_stat * df1 + df2)
        
        # Cohen's d
        means_by_task <- var_data %>%
          group_by(task) %>%
          summarise(mean_val = mean(value, na.rm = TRUE),
                   sd_val = sd(value, na.rm = TRUE),
                   n = n(), .groups = "drop")
        
        if(nrow(means_by_task) == 2) {
          pooled_sd <- sqrt(((means_by_task$n[1] - 1) * means_by_task$sd_val[1]^2 + 
                            (means_by_task$n[2] - 1) * means_by_task$sd_val[2]^2) / 
                           (means_by_task$n[1] + means_by_task$n[2] - 2))
          
          cohens_d <- abs(means_by_task$mean_val[1] - means_by_task$mean_val[2]) / pooled_sd
        } else {
          cohens_d <- NA
        }
        
        cat("F-Statistik:", round(f_stat, 3), "\n")
        cat("p-Wert:", round(p_value, 4), "\n") 
        cat("Partielle η²:", round(eta_squared, 3), "\n")
        cat("Cohen's d:", round(cohens_d, 3), "\n")
        cat("Gesamt-n:", sum(task_counts$n), "\n")
        
        if(p_value < 0.05) {
          cat("✅ SIGNIFIKANT\n")
        } else {
          cat("❌ NICHT SIGNIFIKANT\n")
        }
        
        return(list(
          eta_squared = eta_squared,
          cohens_d = cohens_d,
          f_stat = f_stat,
          p_value = p_value,
          total_n = sum(task_counts$n),
          data_type = "Rundenweise"
        ))
      }
      
    }, error = function(e) {
      cat("❌ Fehler bei Modell:", e$message, "\n")
      return(NULL)
    })
  } else {
    cat("⚠️ ZU WENIG DATEN\n")
    return(NULL)
  }
}

# Funktion für Effektgrößenberechnung (Team-Faktoren)
calculate_team_effect_sizes <- function(var, data = task_analysis_data_team) {
  
  cat("\n--- ANALYSE FÜR (TEAM-FAKTOREN):", var, "---\n")
  
  var_data <- data %>%
    filter(!is.na(!!sym(var))) %>%
    mutate(
      task = droplevels(task),
      comm = droplevels(comm)
    )
  
  # Prüfe Stichprobengröße
  task_counts <- var_data %>%
    group_by(task) %>%
    summarise(n = n(), .groups = "drop")
  
  cat("Stichprobengrößen:\n")
  print(task_counts)
  
  if(nrow(task_counts) >= 2 && all(task_counts$n >= 3)) {
    
    tryCatch({
      # Einfache ANOVA
      model <- aov(as.formula(paste(var, "~ task + comm")), data = var_data)
      
      anova_result <- summary(model)
      
      if(length(anova_result) > 0 && nrow(anova_result[[1]]) > 0) {
        f_stat <- anova_result[[1]]$`F value`[1]  # Task ist erste Zeile
        df1 <- anova_result[[1]]$Df[1]
        df2 <- anova_result[[1]]$Df[length(anova_result[[1]]$Df)]  # Residuals
        p_value <- anova_result[[1]]$`Pr(>F)`[1]
        
        if(!is.na(f_stat) && !is.na(df1) && !is.na(df2)) {
          # Partielle Eta-Quadrat
          eta_squared <- f_stat * df1 / (f_stat * df1 + df2)
          
          # Cohen's d
          means_by_task <- var_data %>%
            group_by(task) %>%
            summarise(mean_val = mean(!!sym(var), na.rm = TRUE),
                     sd_val = sd(!!sym(var), na.rm = TRUE),
                     n = n(), .groups = "drop")
          
          if(nrow(means_by_task) == 2) {
            pooled_sd <- sqrt(((means_by_task$n[1] - 1) * means_by_task$sd_val[1]^2 + 
                              (means_by_task$n[2] - 1) * means_by_task$sd_val[2]^2) / 
                             (means_by_task$n[1] + means_by_task$n[2] - 2))
            
            cohens_d <- abs(means_by_task$mean_val[1] - means_by_task$mean_val[2]) / pooled_sd
          } else {
            cohens_d <- NA
          }
          
          cat("F-Statistik:", round(f_stat, 3), "\n")
          cat("p-Wert:", round(p_value, 4), "\n") 
          cat("Partielle η²:", round(eta_squared, 3), "\n")
          cat("Cohen's d:", round(cohens_d, 3), "\n")
          cat("Gesamt-n:", sum(task_counts$n), "\n")
          
          if(p_value < 0.05) {
            cat("✅ SIGNIFIKANT\n")
          } else {
            cat("❌ NICHT SIGNIFIKANT\n")
          }
          
          return(list(
            eta_squared = eta_squared,
            cohens_d = cohens_d,
            f_stat = f_stat,
            p_value = p_value,
            total_n = sum(task_counts$n),
            data_type = "Team-Faktoren"
          ))
        }
      }
      
    }, error = function(e) {
      cat("❌ Fehler bei Modell:", e$message, "\n")
      return(NULL)
    })
  } else {
    cat("⚠️ ZU WENIG DATEN\n")
    return(NULL)
  }
}

# Berechne Effektgrößen für alle erweiterten Variablen
extended_effect_sizes <- list()

# Rundenweise Variablen
for(var in rundenweise_vars) {
  result <- calculate_round_effect_sizes(var)
  if(!is.null(result)) {
    extended_effect_sizes[[var]] <- result
  }
}
## 
## --- ANALYSE FÜR (RUNDENWEISE): flow_score ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    460
## 2 HP      345
## F-Statistik: 1.293 
## p-Wert: 0.2559 
## Partielle η²: 0.002 
## Cohen's d: 0.053 
## Gesamt-n: 805 
## ❌ NICHT SIGNIFIKANT
## 
## --- ANALYSE FÜR (RUNDENWEISE): stress_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    460
## 2 HP      345
## F-Statistik: 7.926 
## p-Wert: 0.005 
## Partielle η²: 0.011 
## Cohen's d: 0.151 
## Gesamt-n: 805 
## ✅ SIGNIFIKANT
## 
## --- ANALYSE FÜR (RUNDENWEISE): individual_motivation_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    460
## 2 HP      345
## F-Statistik: 9.411 
## p-Wert: 0.0022 
## Partielle η²: 0.014 
## Cohen's d: 0.178 
## Gesamt-n: 805 
## ✅ SIGNIFIKANT
## 
## --- ANALYSE FÜR (RUNDENWEISE): valence_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    460
## 2 HP      345
## F-Statistik: 47.444 
## p-Wert: 0 
## Partielle η²: 0.065 
## Cohen's d: 0.42 
## Gesamt-n: 805 
## ✅ SIGNIFIKANT
## 
## --- ANALYSE FÜR (RUNDENWEISE): arousal_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    460
## 2 HP      345
## F-Statistik: 21.456 
## p-Wert: 0 
## Partielle η²: 0.03 
## Cohen's d: 0.219 
## Gesamt-n: 805 
## ✅ SIGNIFIKANT
## 
## --- ANALYSE FÜR (RUNDENWEISE): information_sharing_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    460
## 2 HP      345
## F-Statistik: 68.799 
## p-Wert: 0 
## Partielle η²: 0.092 
## Cohen's d: 0.449 
## Gesamt-n: 805 
## ✅ SIGNIFIKANT
## 
## --- ANALYSE FÜR (RUNDENWEISE): synchronization_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    460
## 2 HP      345
## F-Statistik: 0.02 
## p-Wert: 0.8887 
## Partielle η²: 0 
## Cohen's d: 0.007 
## Gesamt-n: 805 
## ❌ NICHT SIGNIFIKANT
# Team-Faktoren
for(var in team_factor_vars) {
  result <- calculate_team_effect_sizes(var)
  if(!is.null(result)) {
    extended_effect_sizes[[var]] <- result
  }
}
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): team_composition_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 2.577 
## p-Wert: 0.1098 
## Partielle η²: 0.011 
## Cohen's d: 0.207 
## Gesamt-n: 240 
## ❌ NICHT SIGNIFIKANT
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): team_motivation_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 1.431 
## p-Wert: 0.2327 
## Partielle η²: 0.006 
## Cohen's d: 0.155 
## Gesamt-n: 240 
## ❌ NICHT SIGNIFIKANT
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): interdependence_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 2.762 
## p-Wert: 0.0979 
## Partielle η²: 0.012 
## Cohen's d: 0.215 
## Gesamt-n: 240 
## ❌ NICHT SIGNIFIKANT
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): common_goal_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 3.479 
## p-Wert: 0.0634 
## Partielle η²: 0.014 
## Cohen's d: 0.24 
## Gesamt-n: 240 
## ❌ NICHT SIGNIFIKANT
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): means_coordination_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 7.298 
## p-Wert: 0.0074 
## Partielle η²: 0.03 
## Cohen's d: 0.335 
## Gesamt-n: 240 
## ✅ SIGNIFIKANT
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): group_size_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 0.233 
## p-Wert: 0.6298 
## Partielle η²: 0.001 
## Cohen's d: 0.062 
## Gesamt-n: 240 
## ❌ NICHT SIGNIFIKANT
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): group_diversity_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 3.301 
## p-Wert: 0.0705 
## Partielle η²: 0.014 
## Cohen's d: 0.235 
## Gesamt-n: 240 
## ❌ NICHT SIGNIFIKANT
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): group_skill_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 0.496 
## p-Wert: 0.4819 
## Partielle η²: 0.002 
## Cohen's d: 0.091 
## Gesamt-n: 240 
## ❌ NICHT SIGNIFIKANT
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): communication_required_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 83.272 
## p-Wert: 0 
## Partielle η²: 0.26 
## Cohen's d: 1.137 
## Gesamt-n: 240 
## ✅ SIGNIFIKANT
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): work_independence_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 34.545 
## p-Wert: 0 
## Partielle η²: 0.127 
## Cohen's d: 0.754 
## Gesamt-n: 240 
## ✅ SIGNIFIKANT
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): social_presence_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 3.851 
## p-Wert: 0.0509 
## Partielle η²: 0.016 
## Cohen's d: 0.25 
## Gesamt-n: 240 
## ❌ NICHT SIGNIFIKANT
## 
## --- ANALYSE FÜR (TEAM-FAKTOREN): perceived_task_complexity_value ---
## Stichprobengrößen:
## # A tibble: 2 × 2
##   task      n
##   <fct> <int>
## 1 Math    120
## 2 HP      120
## F-Statistik: 0.076 
## p-Wert: 0.7831 
## Partielle η²: 0 
## Cohen's d: 0.036 
## Gesamt-n: 240 
## ❌ NICHT SIGNIFIKANT
# ================================================================================
# 3. ERWEITERTE ZUSAMMENFASSUNG UND KORREKTUR
# ================================================================================

print("\n3. ERWEITERTE ZUSAMMENFASSUNG UND KORREKTUR:")
## [1] "\n3. ERWEITERTE ZUSAMMENFASSUNG UND KORREKTUR:"
# Erstelle erweiterte korrigierte Ergebnistabelle
create_extended_results_table <- function() {
  
  extended_results <- data.frame(
    Variable = all_vars,
    stringsAsFactors = FALSE
  )
  
  # Datentyp hinzufügen
  extended_results$Data_Type <- ifelse(extended_results$Variable %in% rundenweise_vars, 
                                      "Rundenweise", "Team-Faktoren")
  
  # Initialisiere Spalten
  extended_results$N_total <- 0
  extended_results$Eta_squared <- NA_real_
  extended_results$Cohens_d <- NA_real_
  extended_results$P_value <- NA_real_
  extended_results$F_stat <- NA_real_
  
  # Fülle Ergebnisse
  for(var in names(extended_effect_sizes)) {
    idx <- which(extended_results$Variable == var)
    result <- extended_effect_sizes[[var]]
    
    extended_results$N_total[idx] <- result$total_n
    extended_results$Eta_squared[idx] <- round(result$eta_squared, 3)
    extended_results$Cohens_d[idx] <- round(result$cohens_d, 3)
    extended_results$P_value[idx] <- round(result$p_value, 4)
    extended_results$F_stat[idx] <- round(result$f_stat, 3)
  }
  
  # Berechne Zuverlässigkeitsindikatoren
  extended_results$Signifikant <- extended_results$P_value < 0.05 & !is.na(extended_results$P_value)
  extended_results$Reliable <- extended_results$N_total >= 20 & 
                               (extended_results$Eta_squared < 0.9 | is.na(extended_results$Eta_squared))
  
  # Variable Labels hinzufügen
  extended_results$Variable_Clean <- variable_labels[extended_results$Variable]
  extended_results$Variable_Clean <- ifelse(is.na(extended_results$Variable_Clean), 
                                           extended_results$Variable, 
                                           extended_results$Variable_Clean)
  
  # Sortiere nach Typ, Zuverlässigkeit und Effektgröße
  extended_results <- extended_results %>%
    arrange(Data_Type, desc(Reliable), desc(Signifikant), desc(Eta_squared))
  
  return(extended_results)
}

extended_corrected_results <- create_extended_results_table()

print("ERWEITERTE KORRIGIERTE ERGEBNISTABELLE:")
## [1] "ERWEITERTE KORRIGIERTE ERGEBNISTABELLE:"
print(extended_corrected_results %>%
      select(Variable_Clean, Data_Type, N_total, Signifikant, P_value, 
             Eta_squared, Cohens_d, Reliable))
##               Variable_Clean     Data_Type N_total Signifikant P_value
## 1        Information Sharing   Rundenweise     805        TRUE  0.0000
## 2                    Valence   Rundenweise     805        TRUE  0.0000
## 3                    Arousal   Rundenweise     805        TRUE  0.0000
## 4      Individual Motivation   Rundenweise     805        TRUE  0.0022
## 5                     Stress   Rundenweise     805        TRUE  0.0050
## 6                 Flow Score   Rundenweise     805       FALSE  0.2559
## 7            Synchronization   Rundenweise     805       FALSE  0.8887
## 8     Communication Required Team-Faktoren     240        TRUE  0.0000
## 9          Work Independence Team-Faktoren     240        TRUE  0.0000
## 10    Means for Coordination Team-Faktoren     240        TRUE  0.0074
## 11           Social Presence Team-Faktoren     240       FALSE  0.0509
## 12               Common Goal Team-Faktoren     240       FALSE  0.0634
## 13           Group Diversity Team-Faktoren     240       FALSE  0.0705
## 14           Interdependence Team-Faktoren     240       FALSE  0.0979
## 15          Team Composition Team-Faktoren     240       FALSE  0.1098
## 16           Team Motivation Team-Faktoren     240       FALSE  0.2327
## 17               Group Skill Team-Faktoren     240       FALSE  0.4819
## 18                Group Size Team-Faktoren     240       FALSE  0.6298
## 19 Perceived Task Complexity Team-Faktoren     240       FALSE  0.7831
##    Eta_squared Cohens_d Reliable
## 1        0.092    0.449     TRUE
## 2        0.065    0.420     TRUE
## 3        0.030    0.219     TRUE
## 4        0.014    0.178     TRUE
## 5        0.011    0.151     TRUE
## 6        0.002    0.053     TRUE
## 7        0.000    0.007     TRUE
## 8        0.260    1.137     TRUE
## 9        0.127    0.754     TRUE
## 10       0.030    0.335     TRUE
## 11       0.016    0.250     TRUE
## 12       0.014    0.240     TRUE
## 13       0.014    0.235     TRUE
## 14       0.012    0.215     TRUE
## 15       0.011    0.207     TRUE
## 16       0.006    0.155     TRUE
## 17       0.002    0.091     TRUE
## 18       0.001    0.062     TRUE
## 19       0.000    0.036     TRUE
# ================================================================================
# 4. ERWEITERTE INTERPRETATION UND EMPFEHLUNGEN
# ================================================================================

cat("\n", paste(rep("=", 80), collapse=""), "\n")
## 
##  ================================================================================
cat("ERWEITERTE INTERPRETATION DER EFFEKTGRÖSSEN\n")
## ERWEITERTE INTERPRETATION DER EFFEKTGRÖSSEN
cat(paste(rep("=", 80), collapse=""), "\n")
## ================================================================================
# Kategorisiere Ergebnisse
reliable_round_vars <- extended_corrected_results$Variable[
  extended_corrected_results$Data_Type == "Rundenweise" & 
  extended_corrected_results$Reliable & 
  !is.na(extended_corrected_results$Reliable)]

reliable_team_vars <- extended_corrected_results$Variable[
  extended_corrected_results$Data_Type == "Team-Faktoren" & 
  extended_corrected_results$Reliable & 
  !is.na(extended_corrected_results$Reliable)]

significant_vars <- extended_corrected_results$Variable[
  extended_corrected_results$Signifikant & 
  !is.na(extended_corrected_results$Signifikant)]

unreliable_vars <- extended_corrected_results$Variable[
  !extended_corrected_results$Reliable & 
  !is.na(extended_corrected_results$Reliable)]

cat("📊 ÜBERSICHT ALLER ERWEITERTEN VARIABLEN:\n")
## 📊 ÜBERSICHT ALLER ERWEITERTEN VARIABLEN:
cat("- Rundenweise Variablen:", length(rundenweise_vars), "\n")
## - Rundenweise Variablen: 7
cat("- Team-Faktoren:", length(team_factor_vars), "\n")
## - Team-Faktoren: 12
cat("- Gesamt analysiert:", length(extended_effect_sizes), "\n\n")
## - Gesamt analysiert: 19
if(length(reliable_round_vars) > 0) {
  cat("✅ ZUVERLÄSSIGE RUNDENWEISE ERGEBNISSE:\n")
  for(var in reliable_round_vars) {
    row <- extended_corrected_results[extended_corrected_results$Variable == var, ]
    sig_marker <- ifelse(row$Signifikant, "***", "")
    cat("  -", row$Variable_Clean, ": η²=", row$Eta_squared, ", d=", row$Cohens_d, 
        ", p=", row$P_value, sig_marker, "\n")
  }
  cat("\n")
}
## ✅ ZUVERLÄSSIGE RUNDENWEISE ERGEBNISSE:
##   - Information Sharing : η²= 0.092 , d= 0.449 , p= 0 *** 
##   - Valence : η²= 0.065 , d= 0.42 , p= 0 *** 
##   - Arousal : η²= 0.03 , d= 0.219 , p= 0 *** 
##   - Individual Motivation : η²= 0.014 , d= 0.178 , p= 0.0022 *** 
##   - Stress : η²= 0.011 , d= 0.151 , p= 0.005 *** 
##   - Flow Score : η²= 0.002 , d= 0.053 , p= 0.2559  
##   - Synchronization : η²= 0 , d= 0.007 , p= 0.8887
if(length(reliable_team_vars) > 0) {
  cat("✅ ZUVERLÄSSIGE TEAM-FAKTOREN ERGEBNISSE:\n")
  for(var in reliable_team_vars) {
    row <- extended_corrected_results[extended_corrected_results$Variable == var, ]
    sig_marker <- ifelse(row$Signifikant, "***", "")
    cat("  -", row$Variable_Clean, ": η²=", row$Eta_squared, ", d=", row$Cohens_d, 
        ", p=", row$P_value, sig_marker, "\n")
  }
  cat("\n")
}
## ✅ ZUVERLÄSSIGE TEAM-FAKTOREN ERGEBNISSE:
##   - Communication Required : η²= 0.26 , d= 1.137 , p= 0 *** 
##   - Work Independence : η²= 0.127 , d= 0.754 , p= 0 *** 
##   - Means for Coordination : η²= 0.03 , d= 0.335 , p= 0.0074 *** 
##   - Social Presence : η²= 0.016 , d= 0.25 , p= 0.0509  
##   - Common Goal : η²= 0.014 , d= 0.24 , p= 0.0634  
##   - Group Diversity : η²= 0.014 , d= 0.235 , p= 0.0705  
##   - Interdependence : η²= 0.012 , d= 0.215 , p= 0.0979  
##   - Team Composition : η²= 0.011 , d= 0.207 , p= 0.1098  
##   - Team Motivation : η²= 0.006 , d= 0.155 , p= 0.2327  
##   - Group Skill : η²= 0.002 , d= 0.091 , p= 0.4819  
##   - Group Size : η²= 0.001 , d= 0.062 , p= 0.6298  
##   - Perceived Task Complexity : η²= 0 , d= 0.036 , p= 0.7831
if(length(significant_vars) > 0) {
  cat("🎯 SIGNIFIKANTE TASK-UNTERSCHIEDE (alle Typen):\n")
  for(var in significant_vars) {
    row <- extended_corrected_results[extended_corrected_results$Variable == var, ]
    cat("  -", row$Variable_Clean, "(", row$Data_Type, "): p=", row$P_value, "\n")
  }
  cat("\n")
}
## 🎯 SIGNIFIKANTE TASK-UNTERSCHIEDE (alle Typen):
##   - Information Sharing ( Rundenweise ): p= 0 
##   - Valence ( Rundenweise ): p= 0 
##   - Arousal ( Rundenweise ): p= 0 
##   - Individual Motivation ( Rundenweise ): p= 0.0022 
##   - Stress ( Rundenweise ): p= 0.005 
##   - Communication Required ( Team-Faktoren ): p= 0 
##   - Work Independence ( Team-Faktoren ): p= 0 
##   - Means for Coordination ( Team-Faktoren ): p= 0.0074
if(length(unreliable_vars) > 0) {
  cat("⚠️ UNZUVERLÄSSIGE ERGEBNISSE (zu wenig Daten oder extreme Effektgrößen):\n")
  for(var in unreliable_vars) {
    row <- extended_corrected_results[extended_corrected_results$Variable == var, ]
    cat("  -", row$Variable_Clean, ": n=", row$N_total, ", η²=", row$Eta_squared, "\n")
  }
  cat("\n")
}

cat("💡 AKTUALISIERTE EMPFEHLUNGEN:\n")
## 💡 AKTUALISIERTE EMPFEHLUNGEN:
cat("1. 🎯 Fokussiere auf", length(c(reliable_round_vars, reliable_team_vars)), "zuverlässige Variablen\n")
## 1. 🎯 Fokussiere auf 19 zuverlässige Variablen
cat("2. 📈 Verwende", length(significant_vars), "signifikante Variablen für Mediationsanalysen\n")
## 2. 📈 Verwende 8 signifikante Variablen für Mediationsanalysen
cat("3. 🔍 Sammle mehr Daten für", length(unreliable_vars), "unzuverlässige Variablen\n")
## 3. 🔍 Sammle mehr Daten für 0 unzuverlässige Variablen
cat("4. 📊 Getrennte Analysen für rundenweise vs. Team-Faktoren durchführen\n")
## 4. 📊 Getrennte Analysen für rundenweise vs. Team-Faktoren durchführen
cat("5. 🎨 Visualisierungen nur für zuverlässige Ergebnisse erstellen\n\n")
## 5. 🎨 Visualisierungen nur für zuverlässige Ergebnisse erstellen
print("Erweiterte Effektgrößen-Analyse abgeschlossen! 🔍")
## [1] "Erweiterte Effektgrößen-Analyse abgeschlossen! 🔍"

Mixed Design Analysis - HP/Video vs. HP/Chat vs. Math/Video vs. Math/Chat

# ================================================================================
# 4-TREATMENT MIXED-DESIGN ANALYSE: HP-Video, HP-Chat, Math-Video, Math-Chat
# Mixed Design: Communication (Between-Subjects) × Task (Within-Subjects)
# Nur Team-Faktoren aus dem zweiten Experiment
# ================================================================================

library(dplyr)
library(tidyr)
library(ggplot2)
library(lme4)
library(lmerTest)
library(car)
library(emmeans)
library(effectsize)
library(gridExtra)
library(broom)
library(broom.mixed)
library(RColorBrewer)

# ================================================================================
# SCHRITT 1: DATENAUFBEREITUNG FÜR MIXED-DESIGN ANALYSE
# ================================================================================

print("=== SCHRITT 1: MIXED-DESIGN DATENAUFBEREITUNG (NUR TEAM-FAKTOREN) ===")
## [1] "=== SCHRITT 1: MIXED-DESIGN DATENAUFBEREITUNG (NUR TEAM-FAKTOREN) ==="
# Definiere nur Team-Faktoren (keine rundenweisen Variablen)
team_factor_vars <- c(
  "team_composition_value", "team_motivation_value", "interdependence_value", 
  "common_goal_value", "means_coordination_value",
  "group_size_value", "group_diversity_value", "group_skill_value",
  "communication_required_value", "work_independence_value", "social_presence_value", "perceived_task_complexity_value"
)

# Variable Labels für schönere Plots
variable_labels <- c(
  "team_composition_value" = "Team Composition",
  "team_motivation_value" = "Team Motivation",
  "interdependence_value" = "Mutual Dependence",
  "common_goal_value" = "Clear Goal Process",
  "means_coordination_value" = "Coordination Possible",
  "group_size_value" = "Group Size Just Right",
  "group_diversity_value" = "Adequate Perspective Diversity",
  "group_skill_value" = "Complementary Skills",
  "communication_required_value" = "Communication Required",
  "work_independence_value" = "Work Independence",
  "social_presence_value" = "Social Presence",
  "perceived_task_complexity_value" = "Perceived Task Complexity"
)

# DATENFILTERUNG: Nur zweites Experiment (Team-Faktoren, Post-Round)
mixed_design_data <- integrated_data_full %>%
  filter(
    !is.na(perceived_task_complexity_value),  # Nur zweites Experiment
    round == "Post",  # Nur Post-Runden für Team-Faktoren
    comm %in% c("Together_Chat", "Together_Jitsi"),  # Nur relevante Kommunikationsformen
    task %in% c("Math", "HP")  # Beide Tasks
  ) %>%
  mutate(
    task = factor(task, levels = c("Math", "HP")),
    comm = factor(comm, levels = c("Together_Chat", "Together_Jitsi"), 
                  labels = c("Chat", "Video")),  # Schönere Labels
    # Erstelle 4-Treatment Variable für Visualisierung
    treatment = factor(paste(task, comm, sep = "-"),
                      levels = c("Math-Chat", "Math-Video", "HP-Chat", "HP-Video")),
    participant.code = factor(participant.code)
  ) %>%
  filter(rowSums(is.na(select(., all_of(team_factor_vars)))) <= 5)  # Max 50% fehlende Werte

print(paste("Mixed-Design Daten:", nrow(mixed_design_data), "Beobachtungen"))
## [1] "Mixed-Design Daten: 240 Beobachtungen"
# Prüfe das Design: Within-Subjects (Task) und Between-Subjects (Communication)
design_check <- mixed_design_data %>%
  group_by(participant.code) %>%
  summarise(
    n_tasks = n_distinct(task),
    n_comms = n_distinct(comm),
    comm_type = first(comm),
    .groups = "drop"
  )

cat("DESIGN VALIDATION:\n")
## DESIGN VALIDATION:
cat("- Personen mit beiden Tasks (Within-Subjects):", sum(design_check$n_tasks == 2), "\n")
## - Personen mit beiden Tasks (Within-Subjects): 120
cat("- Personen mit nur einem Task:", sum(design_check$n_tasks == 1), "\n")
## - Personen mit nur einem Task: 0
cat("- Personen mit beiden Comm-Formen (sollte 0 sein):", sum(design_check$n_comms == 2), "\n")
## - Personen mit beiden Comm-Formen (sollte 0 sein): 0
comm_distribution <- table(design_check$comm_type)
cat("- Between-Subjects Verteilung:\n")
## - Between-Subjects Verteilung:
print(comm_distribution)
## 
##  Chat Video 
##    60    60
# Übersicht der 4 Treatments
treatment_overview <- mixed_design_data %>%
  group_by(treatment, task, comm) %>%
  summarise(
    n_participants = n_distinct(participant.code),
    n_observations = n(),
    .groups = "drop"
  )

print("\n4-Treatment Übersicht (Mixed-Design):")
## [1] "\n4-Treatment Übersicht (Mixed-Design):"
print(treatment_overview)
## # A tibble: 4 × 5
##   treatment  task  comm  n_participants n_observations
##   <fct>      <fct> <fct>          <int>          <int>
## 1 Math-Chat  Math  Chat              60             60
## 2 Math-Video Math  Video             60             60
## 3 HP-Chat    HP    Chat              60             60
## 4 HP-Video   HP    Video             60             60
# ================================================================================
# SCHRITT 2: MIXED-EFFECTS 2x2 ANOVA FÜR JEDE TEAM-FAKTOR VARIABLE
# ================================================================================

print("\n=== SCHRITT 2: MIXED-EFFECTS 2x2 ANOVA ANALYSE ===")
## [1] "\n=== SCHRITT 2: MIXED-EFFECTS 2x2 ANOVA ANALYSE ==="
analyze_mixed_design_variable <- function(var_name, data) {
  cat("\n", paste(rep("=", 60), collapse=""), "\n")
  cat("MIXED-DESIGN 2x2 ANOVA FÜR:", toupper(var_name), "\n")
  cat(paste(rep("=", 60), collapse=""), "\n")
  
  var_data <- data %>%
    filter(!is.na(!!sym(var_name))) %>%
    mutate(
      treatment = droplevels(treatment),
      task = droplevels(task),
      comm = droplevels(comm)
    )
  
  # Prüfe Stichprobengrößen pro Treatment
  treatment_counts <- var_data %>%
    group_by(treatment) %>%
    summarise(n = n(), .groups = "drop")
  
  cat("Stichprobengrößen pro Treatment:\n")
  print(treatment_counts)
  
  # Prüfe ob genügend Daten für alle Treatments
  if(nrow(treatment_counts) >= 4 && all(treatment_counts$n >= 3)) {
    tryCatch({
      # Mixed-Effects ANOVA: Communication (Between) × Task (Within) + Random Effect
      model <- lmer(as.formula(paste(var_name, "~ task * comm + (1|participant.code)")), 
                    data = var_data)
      
      anova_result <- anova(model)
      print(anova_result)
      
      # Erstelle Effekte-Zusammenfassung
      effects_summary <- data.frame(
        Effect = rownames(anova_result),
        F_value = round(anova_result$`F value`, 3),
        NumDF = anova_result$NumDF,
        DenDF = round(anova_result$DenDF, 1),
        p_value = round(anova_result$`Pr(>F)`, 4),
        stringsAsFactors = FALSE
      )
      
      # Berechne partielle η² für jeden Effekt
      effects_summary$eta_squared <- NA
      for(i in 1:nrow(effects_summary)) {
        f_stat <- anova_result$`F value`[i]
        df1 <- anova_result$NumDF[i] 
        df2 <- anova_result$DenDF[i]
        
        if(!is.na(f_stat) && !is.na(df1) && !is.na(df2)) {
          eta_squared <- f_stat * df1 / (f_stat * df1 + df2)
          effects_summary$eta_squared[i] <- round(eta_squared, 3)
        }
      }
      
      cat("\nEFFEKTGRÖSSEN:\n")
      print(effects_summary)
      
      # Klassifiziere Effektgrößen
      for(i in 1:nrow(effects_summary)) {
        effect_name <- effects_summary$Effect[i]
        eta2 <- effects_summary$eta_squared[i]
        p_val <- effects_summary$p_value[i]
        
        significance <- ifelse(p_val < 0.001, "***", 
                              ifelse(p_val < 0.01, "**", 
                                    ifelse(p_val < 0.05, "*", "ns")))
        
        effect_size <- ifelse(is.na(eta2), "NA",
                             ifelse(eta2 < 0.01, "trivial",
                                   ifelse(eta2 < 0.06, "small", 
                                         ifelse(eta2 < 0.14, "medium", "large"))))
        
        cat("  ", effect_name, ": η² =", eta2, "(", effect_size, "),", 
            "p =", p_val, significance, "\n")
      }
      
      # Post-hoc Tests wenn signifikant
      significant_effects <- effects_summary$Effect[effects_summary$p_value < 0.05]
      
      if(length(significant_effects) > 0) {
        cat("\n🔍 SIGNIFIKANTE EFFEKTE:", paste(significant_effects, collapse = ", "), "\n")
        
        # Estimated Marginal Means für alle Treatments
        emm_treatment <- emmeans(model, ~ task * comm)
        
        cat("\nEstimated Marginal Means:\n")
        print(emm_treatment)
        
        # Paarweise Vergleiche zwischen allen 4 Treatments
        pairwise_treatment <- pairs(emm_treatment, adjust = "bonferroni")
        
        cat("\nPaarweise Treatment-Vergleiche (Bonferroni-korrigiert):\n")
        print(pairwise_treatment)
        
        # Haupteffekt-Vergleiche wenn signifikant
        if("task" %in% significant_effects) {
          emm_task <- emmeans(model, "task")
          cat("\nTask-Haupteffekt:\n")
          print(pairs(emm_task))
        }
        
        if("comm" %in% significant_effects) {
          emm_comm <- emmeans(model, "comm")
          cat("\nCommunication-Haupteffekt:\n")
          print(pairs(emm_comm))
        }
        
        return(list(
          model = model, 
          anova = anova_result, 
          effects_summary = effects_summary,
          emmeans = emm_treatment,
          pairwise = pairwise_treatment, 
          significant = TRUE,
          significant_effects = significant_effects
        ))
      }
      
      return(list(
        model = model, 
        anova = anova_result, 
        effects_summary = effects_summary,
        significant = FALSE,
        significant_effects = c()
      ))
      
    }, error = function(e) {
      cat("❌ Fehler bei Mixed-Effects ANOVA:", e$message, "\n")
      
      # Fallback: Einfache ANOVA wenn Mixed-Effects fehlschlägt
      tryCatch({
        cat("Versuche einfache ANOVA als Fallback...\n")
        simple_model <- aov(as.formula(paste(var_name, "~ task * comm")), data = var_data)
        simple_anova <- summary(simple_model)
        print(simple_anova)
        
        return(list(
          model = simple_model,
          anova = simple_anova,
          fallback = TRUE,
          significant = FALSE
        ))
      }, error = function(e2) {
        cat("❌ Auch einfache ANOVA fehlgeschlagen:", e2$message, "\n")
        return(NULL)
      })
    })
  } else {
    cat("⚠️ Nicht genügend Daten für alle 4 Treatments\n")
    print(treatment_counts)
    return(NULL)
  }
}

# Führe Mixed-Design Analysen für alle Team-Faktoren durch
mixed_design_results <- list()
for(var in team_factor_vars) {
  result <- analyze_mixed_design_variable(var, mixed_design_data)
  if(!is.null(result)) {
    mixed_design_results[[var]] <- result
  }
}
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: TEAM_COMPOSITION_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##            Sum Sq Mean Sq NumDF DenDF F value  Pr(>F)  
## task      0.54573 0.54573     1   118  4.8849 0.02902 *
## comm      0.05549 0.05549     1   118  0.4967 0.48234  
## task:comm 0.04326 0.04326     1   118  0.3872 0.53495  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task   4.885     1   118  0.0290       0.040
## 2      comm   0.497     1   118  0.4823       0.004
## 3 task:comm   0.387     1   118  0.5350       0.003
##    task : η² = 0.04 ( small ), p = 0.029 * 
##    comm : η² = 0.004 ( trivial ), p = 0.4823 ns 
##    task:comm : η² = 0.003 ( trivial ), p = 0.535 ns 
## 
## 🔍 SIGNIFIKANTE EFFEKTE: task 
## 
## Estimated Marginal Means:
##  task comm  emmean     SE  df lower.CL upper.CL
##  Math Chat    3.65 0.0595 193     3.53     3.76
##  HP   Chat    3.71 0.0595 193     3.60     3.83
##  Math Video   3.57 0.0595 193     3.45     3.69
##  HP   Video   3.69 0.0595 193     3.57     3.81
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## Paarweise Treatment-Vergleiche (Bonferroni-korrigiert):
##  contrast               estimate     SE  df t.ratio p.value
##  Math Chat - HP Chat     -0.0685 0.0610 118  -1.123  1.0000
##  Math Chat - Math Video   0.0778 0.0842 193   0.924  1.0000
##  Math Chat - HP Video    -0.0444 0.0842 193  -0.528  1.0000
##  HP Chat - Math Video     0.1463 0.0842 193   1.738  0.5026
##  HP Chat - HP Video       0.0241 0.0842 193   0.286  1.0000
##  Math Video - HP Video   -0.1222 0.0610 118  -2.003  0.2849
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests
## NOTE: Results may be misleading due to involvement in interactions
## 
## Task-Haupteffekt:
##  contrast  estimate     SE  df t.ratio p.value
##  Math - HP  -0.0954 0.0432 118  -2.210  0.0290
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: TEAM_MOTIVATION_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##            Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## task      0.77824 0.77824     1   118  2.3165 0.1307
## comm      0.02492 0.02492     1   118  0.0742 0.7858
## task:comm 0.07824 0.07824     1   118  0.2329 0.6303
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task   2.316     1   118  0.1307       0.019
## 2      comm   0.074     1   118  0.7858       0.001
## 3 task:comm   0.233     1   118  0.6303       0.002
##    task : η² = 0.019 ( small ), p = 0.1307 ns 
##    comm : η² = 0.001 ( trivial ), p = 0.7858 ns 
##    task:comm : η² = 0.002 ( trivial ), p = 0.6303 ns 
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: INTERDEPENDENCE_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##           Sum Sq Mean Sq NumDF DenDF F value  Pr(>F)  
## task      7.7042  7.7042     1   118  3.7279 0.05591 .
## comm      0.2953  0.2953     1   118  0.1429 0.70612  
## task:comm 0.9375  0.9375     1   118  0.4536 0.50193  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task   3.728     1   118  0.0559       0.031
## 2      comm   0.143     1   118  0.7061       0.001
## 3 task:comm   0.454     1   118  0.5019       0.004
##    task : η² = 0.031 ( small ), p = 0.0559 ns 
##    comm : η² = 0.001 ( trivial ), p = 0.7061 ns 
##    task:comm : η² = 0.004 ( trivial ), p = 0.5019 ns 
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: COMMON_GOAL_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##            Sum Sq Mean Sq NumDF DenDF F value  Pr(>F)  
## task      2.43345 2.43345     1   118  5.1604 0.02492 *
## comm      0.69578 0.69578     1   118  1.4755 0.22691  
## task:comm 0.15845 0.15845     1   118  0.3360 0.56325  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task   5.160     1   118  0.0249       0.042
## 2      comm   1.475     1   118  0.2269       0.012
## 3 task:comm   0.336     1   118  0.5632       0.003
##    task : η² = 0.042 ( small ), p = 0.0249 * 
##    comm : η² = 0.012 ( small ), p = 0.2269 ns 
##    task:comm : η² = 0.003 ( trivial ), p = 0.5632 ns 
## 
## 🔍 SIGNIFIKANTE EFFEKTE: task 
## 
## Estimated Marginal Means:
##  task comm  emmean    SE  df lower.CL upper.CL
##  Math Chat    4.76 0.108 213     4.54     4.97
##  HP   Chat    4.91 0.108 213     4.69     5.12
##  Math Video   4.86 0.108 213     4.64     5.07
##  HP   Video   5.11 0.108 213     4.90     5.32
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## Paarweise Treatment-Vergleiche (Bonferroni-korrigiert):
##  contrast               estimate    SE  df t.ratio p.value
##  Math Chat - HP Chat      -0.150 0.125 118  -1.196  1.0000
##  Math Chat - Math Video   -0.100 0.153 213  -0.654  1.0000
##  Math Chat - HP Video     -0.353 0.153 213  -2.307  0.1322
##  HP Chat - Math Video      0.050 0.153 213   0.327  1.0000
##  HP Chat - HP Video       -0.203 0.153 213  -1.326  1.0000
##  Math Video - HP Video    -0.253 0.125 118  -2.016  0.2763
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests
## NOTE: Results may be misleading due to involvement in interactions
## 
## Task-Haupteffekt:
##  contrast  estimate     SE  df t.ratio p.value
##  Math - HP   -0.201 0.0887 118  -2.272  0.0249
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: MEANS_COORDINATION_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##           Sum Sq Mean Sq NumDF DenDF F value    Pr(>F)    
## task      15.504  15.504     1   118  8.6769  0.003883 ** 
## comm      32.108  32.108     1   118 17.9692 4.481e-05 ***
## task:comm  2.400   2.400     1   118  1.3432  0.248817    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task   8.677     1   118  0.0039       0.068
## 2      comm  17.969     1   118  0.0000       0.132
## 3 task:comm   1.343     1   118  0.2488       0.011
##    task : η² = 0.068 ( medium ), p = 0.0039 ** 
##    comm : η² = 0.132 ( medium ), p = 0 *** 
##    task:comm : η² = 0.011 ( small ), p = 0.2488 ns 
## 
## 🔍 SIGNIFIKANTE EFFEKTE: task, comm 
## 
## Estimated Marginal Means:
##  task comm  emmean    SE  df lower.CL upper.CL
##  Math Chat    4.00 0.188 230     3.63     4.37
##  HP   Chat    4.71 0.188 230     4.34     5.08
##  Math Video   5.06 0.188 230     4.69     5.43
##  HP   Video   5.37 0.188 230     5.00     5.74
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## Paarweise Treatment-Vergleiche (Bonferroni-korrigiert):
##  contrast               estimate    SE  df t.ratio p.value
##  Math Chat - HP Chat      -0.708 0.244 118  -2.902  0.0265
##  Math Chat - Math Video   -1.058 0.266 230  -3.978  0.0006
##  Math Chat - HP Video     -1.367 0.266 230  -5.137  <.0001
##  HP Chat - Math Video     -0.350 0.266 230  -1.316  1.0000
##  HP Chat - HP Video       -0.658 0.266 230  -2.475  0.0844
##  Math Video - HP Video    -0.308 0.244 118  -1.263  1.0000
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests
## NOTE: Results may be misleading due to involvement in interactions
## 
## Task-Haupteffekt:
##  contrast  estimate    SE  df t.ratio p.value
##  Math - HP   -0.508 0.173 118  -2.946  0.0039
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger
## NOTE: Results may be misleading due to involvement in interactions
## 
## Communication-Haupteffekt:
##  contrast     estimate    SE  df t.ratio p.value
##  Chat - Video   -0.858 0.202 118  -4.239  <.0001
## 
## Results are averaged over the levels of: task 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: GROUP_SIZE_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##             Sum Sq  Mean Sq NumDF DenDF F value Pr(>F)
## task      0.066667 0.066667     1   118  0.3482 0.5563
## comm      0.267352 0.267352     1   118  1.3964 0.2397
## task:comm 0.007407 0.007407     1   118  0.0387 0.8444
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task   0.348     1   118  0.5563       0.003
## 2      comm   1.396     1   118  0.2397       0.012
## 3 task:comm   0.039     1   118  0.8444       0.000
##    task : η² = 0.003 ( trivial ), p = 0.5563 ns 
##    comm : η² = 0.012 ( small ), p = 0.2397 ns 
##    task:comm : η² = 0 ( trivial ), p = 0.8444 ns 
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: GROUP_DIVERSITY_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##            Sum Sq Mean Sq NumDF DenDF F value  Pr(>F)  
## task      2.14074 2.14074     1   118  4.6240 0.03357 *
## comm      0.01634 0.01634     1   118  0.0353 0.85131  
## task:comm 0.00741 0.00741     1   118  0.0160 0.89956  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task   4.624     1   118  0.0336       0.038
## 2      comm   0.035     1   118  0.8513       0.000
## 3 task:comm   0.016     1   118  0.8996       0.000
##    task : η² = 0.038 ( small ), p = 0.0336 * 
##    comm : η² = 0 ( trivial ), p = 0.8513 ns 
##    task:comm : η² = 0 ( trivial ), p = 0.8996 ns 
## 
## 🔍 SIGNIFIKANTE EFFEKTE: task 
## 
## Estimated Marginal Means:
##  task comm  emmean    SE  df lower.CL upper.CL
##  Math Chat    3.13 0.104 218     2.93     3.34
##  HP   Chat    3.33 0.104 218     3.13     3.54
##  Math Video   3.17 0.104 218     2.96     3.37
##  HP   Video   3.34 0.104 218     3.14     3.55
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## Paarweise Treatment-Vergleiche (Bonferroni-korrigiert):
##  contrast               estimate    SE  df t.ratio p.value
##  Math Chat - HP Chat     -0.2000 0.124 118  -1.610  0.6605
##  Math Chat - Math Video  -0.0333 0.147 218  -0.226  1.0000
##  Math Chat - HP Video    -0.2111 0.147 218  -1.433  0.9201
##  HP Chat - Math Video     0.1667 0.147 218   1.131  1.0000
##  HP Chat - HP Video      -0.0111 0.147 218  -0.075  1.0000
##  Math Video - HP Video   -0.1778 0.124 118  -1.431  0.9303
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests
## NOTE: Results may be misleading due to involvement in interactions
## 
## Task-Haupteffekt:
##  contrast  estimate     SE  df t.ratio p.value
##  Math - HP   -0.189 0.0878 118  -2.150  0.0336
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: GROUP_SKILL_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##            Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## task      0.24491 0.24491     1   118  0.8133 0.3690
## comm      0.17067 0.17067     1   118  0.5668 0.4530
## task:comm 0.38935 0.38935     1   118  1.2930 0.2578
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task   0.813     1   118  0.3690       0.007
## 2      comm   0.567     1   118  0.4530       0.005
## 3 task:comm   1.293     1   118  0.2578       0.011
##    task : η² = 0.007 ( trivial ), p = 0.369 ns 
##    comm : η² = 0.005 ( trivial ), p = 0.453 ns 
##    task:comm : η² = 0.011 ( small ), p = 0.2578 ns 
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: COMMUNICATION_REQUIRED_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##            Sum Sq Mean Sq NumDF DenDF F value    Pr(>F)    
## task      199.837 199.837     1   118 97.9847 < 2.2e-16 ***
## comm       32.824  32.824     1   118 16.0945 0.0001061 ***
## task:comm   4.004   4.004     1   118  1.9633 0.1637822    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task  97.985     1   118  0.0000       0.454
## 2      comm  16.094     1   118  0.0001       0.120
## 3 task:comm   1.963     1   118  0.1638       0.016
##    task : η² = 0.454 ( large ), p = 0 *** 
##    comm : η² = 0.12 ( medium ), p = 1e-04 *** 
##    task:comm : η² = 0.016 ( small ), p = 0.1638 ns 
## 
## 🔍 SIGNIFIKANTE EFFEKTE: task, comm 
## 
## Estimated Marginal Means:
##  task comm  emmean  SE  df lower.CL upper.CL
##  Math Chat    3.50 0.2 231     3.11     3.89
##  HP   Chat    5.58 0.2 231     5.19     5.98
##  Math Video   4.62 0.2 231     4.22     5.01
##  HP   Video   6.18 0.2 231     5.79     6.58
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## Paarweise Treatment-Vergleiche (Bonferroni-korrigiert):
##  contrast               estimate    SE  df t.ratio p.value
##  Math Chat - HP Chat      -2.083 0.261 118  -7.990  <.0001
##  Math Chat - Math Video   -1.117 0.282 231  -3.954  0.0006
##  Math Chat - HP Video     -2.683 0.282 231  -9.501  <.0001
##  HP Chat - Math Video      0.967 0.282 231   3.423  0.0044
##  HP Chat - HP Video       -0.600 0.282 231  -2.124  0.2082
##  Math Video - HP Video    -1.567 0.261 118  -6.009  <.0001
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests
## NOTE: Results may be misleading due to involvement in interactions
## 
## Task-Haupteffekt:
##  contrast  estimate    SE  df t.ratio p.value
##  Math - HP    -1.82 0.184 118  -9.899  <.0001
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger
## NOTE: Results may be misleading due to involvement in interactions
## 
## Communication-Haupteffekt:
##  contrast     estimate    SE  df t.ratio p.value
##  Chat - Video   -0.858 0.214 118  -4.012  0.0001
## 
## Results are averaged over the levels of: task 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: WORK_INDEPENDENCE_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##            Sum Sq Mean Sq NumDF DenDF F value    Pr(>F)    
## task      112.067 112.067     1   118 49.7543 1.277e-10 ***
## comm        6.906   6.906     1   118  3.0658   0.08255 .  
## task:comm   0.150   0.150     1   118  0.0666   0.79681    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task  49.754     1   118  0.0000       0.297
## 2      comm   3.066     1   118  0.0826       0.025
## 3 task:comm   0.067     1   118  0.7968       0.001
##    task : η² = 0.297 ( large ), p = 0 *** 
##    comm : η² = 0.025 ( small ), p = 0.0826 ns 
##    task:comm : η² = 0.001 ( trivial ), p = 0.7968 ns 
## 
## 🔍 SIGNIFIKANTE EFFEKTE: task 
## 
## Estimated Marginal Means:
##  task comm  emmean    SE  df lower.CL upper.CL
##  Math Chat    4.43 0.233 215     3.97     4.89
##  HP   Chat    3.02 0.233 215     2.56     3.48
##  Math Video   3.92 0.233 215     3.46     4.38
##  HP   Video   2.60 0.233 215     2.14     3.06
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## Paarweise Treatment-Vergleiche (Bonferroni-korrigiert):
##  contrast               estimate    SE  df t.ratio p.value
##  Math Chat - HP Chat       1.417 0.274 118   5.170  <.0001
##  Math Chat - Math Video    0.517 0.330 215   1.568  0.7101
##  Math Chat - HP Video      1.833 0.330 215   5.564  <.0001
##  HP Chat - Math Video     -0.900 0.330 215  -2.731  0.0410
##  HP Chat - HP Video        0.417 0.330 215   1.265  1.0000
##  Math Video - HP Video     1.317 0.274 118   4.805  <.0001
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests
## NOTE: Results may be misleading due to involvement in interactions
## 
## Task-Haupteffekt:
##  contrast  estimate    SE  df t.ratio p.value
##  Math - HP     1.37 0.194 118   7.054  <.0001
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: SOCIAL_PRESENCE_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##           Sum Sq Mean Sq NumDF DenDF F value   Pr(>F)   
## task      8.7402  8.7402     1   118  7.8273 0.006012 **
## comm      5.6847  5.6847     1   118  5.0910 0.025892 * 
## task:comm 3.0375  3.0375     1   118  2.7202 0.101743   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task   7.827     1   118  0.0060       0.062
## 2      comm   5.091     1   118  0.0259       0.041
## 3 task:comm   2.720     1   118  0.1017       0.023
##    task : η² = 0.062 ( medium ), p = 0.006 ** 
##    comm : η² = 0.041 ( small ), p = 0.0259 * 
##    task:comm : η² = 0.023 ( small ), p = 0.1017 ns 
## 
## 🔍 SIGNIFIKANTE EFFEKTE: task, comm 
## 
## Estimated Marginal Means:
##  task comm  emmean    SE  df lower.CL upper.CL
##  Math Chat    4.16 0.194 188     3.77     4.54
##  HP   Chat    4.76 0.194 188     4.38     5.15
##  Math Video   4.92 0.194 188     4.54     5.30
##  HP   Video   5.08 0.194 188     4.69     5.46
## 
## Degrees-of-freedom method: kenward-roger 
## Confidence level used: 0.95 
## 
## Paarweise Treatment-Vergleiche (Bonferroni-korrigiert):
##  contrast               estimate    SE  df t.ratio p.value
##  Math Chat - HP Chat      -0.607 0.193 118  -3.145  0.0126
##  Math Chat - Math Video   -0.763 0.275 188  -2.777  0.0362
##  Math Chat - HP Video     -0.920 0.275 188  -3.347  0.0059
##  HP Chat - Math Video     -0.157 0.275 188  -0.570  1.0000
##  HP Chat - HP Video       -0.313 0.275 188  -1.140  1.0000
##  Math Video - HP Video    -0.157 0.193 118  -0.812  1.0000
## 
## Degrees-of-freedom method: kenward-roger 
## P value adjustment: bonferroni method for 6 tests
## NOTE: Results may be misleading due to involvement in interactions
## 
## Task-Haupteffekt:
##  contrast  estimate    SE  df t.ratio p.value
##  Math - HP   -0.382 0.136 118  -2.798  0.0060
## 
## Results are averaged over the levels of: comm 
## Degrees-of-freedom method: kenward-roger
## NOTE: Results may be misleading due to involvement in interactions
## 
## Communication-Haupteffekt:
##  contrast     estimate    SE  df t.ratio p.value
##  Chat - Video   -0.538 0.239 118  -2.256  0.0259
## 
## Results are averaged over the levels of: task 
## Degrees-of-freedom method: kenward-roger 
## 
##  ============================================================ 
## MIXED-DESIGN 2x2 ANOVA FÜR: PERCEIVED_TASK_COMPLEXITY_VALUE 
## ============================================================ 
## Stichprobengrößen pro Treatment:
## # A tibble: 4 × 2
##   treatment      n
##   <fct>      <int>
## 1 Math-Chat     60
## 2 Math-Video    60
## 3 HP-Chat       60
## 4 HP-Video      60
## Type III Analysis of Variance Table with Satterthwaite's method
##            Sum Sq Mean Sq NumDF DenDF F value Pr(>F)
## task      0.13776 0.13776     1   118  0.1220 0.7275
## comm      0.06222 0.06222     1   118  0.0551 0.8148
## task:comm 1.79401 1.79401     1   118  1.5890 0.2100
## 
## EFFEKTGRÖSSEN:
##      Effect F_value NumDF DenDF p_value eta_squared
## 1      task   0.122     1   118  0.7275       0.001
## 2      comm   0.055     1   118  0.8148       0.000
## 3 task:comm   1.589     1   118  0.2100       0.013
##    task : η² = 0.001 ( trivial ), p = 0.7275 ns 
##    comm : η² = 0 ( trivial ), p = 0.8148 ns 
##    task:comm : η² = 0.013 ( small ), p = 0.21 ns
# ================================================================================
# SCHRITT 3: ERGEBNISTABELLE ERSTELLEN
# ================================================================================

print("\n=== SCHRITT 3: MIXED-DESIGN ERGEBNISTABELLE ===")
## [1] "\n=== SCHRITT 3: MIXED-DESIGN ERGEBNISTABELLE ==="
create_mixed_design_results_table <- function() {
  
  results_df <- data.frame(
    Variable = team_factor_vars,
    stringsAsFactors = FALSE
  )
  
  # Initialisiere Spalten für alle Effekte
  results_df$N_total <- 0
  results_df$N_treatments <- 0
  results_df$Task_F <- NA_real_
  results_df$Task_eta2 <- NA_real_
  results_df$Task_p <- NA_real_
  results_df$Comm_F <- NA_real_
  results_df$Comm_eta2 <- NA_real_
  results_df$Comm_p <- NA_real_
  results_df$Interaction_F <- NA_real_
  results_df$Interaction_eta2 <- NA_real_
  results_df$Interaction_p <- NA_real_
  results_df$Any_Significant <- FALSE
  results_df$Analysis_Type <- "Mixed-Effects ANOVA"
  
  # Fülle Ergebnisse für jede Variable
  for(var in names(mixed_design_results)) {
    idx <- which(results_df$Variable == var)
    result <- mixed_design_results[[var]]
    
    if(!is.null(result$effects_summary) && !is.null(result$fallback)) {
      # Fallback auf einfache ANOVA
      results_df$Analysis_Type[idx] <- "Simple ANOVA (Fallback)"
    }
    
    if(!is.null(result$effects_summary)) {
      effects <- result$effects_summary
      
      # Task Effekt
      task_row <- which(effects$Effect == "task")
      if(length(task_row) > 0) {
        results_df$Task_F[idx] <- effects$F_value[task_row]
        results_df$Task_eta2[idx] <- effects$eta_squared[task_row]
        results_df$Task_p[idx] <- effects$p_value[task_row]
      }
      
      # Communication Effekt
      comm_row <- which(effects$Effect == "comm")
      if(length(comm_row) > 0) {
        results_df$Comm_F[idx] <- effects$F_value[comm_row]
        results_df$Comm_eta2[idx] <- effects$eta_squared[comm_row]
        results_df$Comm_p[idx] <- effects$p_value[comm_row]
      }
      
      # Interaktion
      int_row <- which(effects$Effect == "task:comm")
      if(length(int_row) > 0) {
        results_df$Interaction_F[idx] <- effects$F_value[int_row]
        results_df$Interaction_eta2[idx] <- effects$eta_squared[int_row]
        results_df$Interaction_p[idx] <- effects$p_value[int_row]
      }
      
      results_df$Any_Significant[idx] <- result$significant
    }
    
    # Stichprobengrößen
    var_data <- mixed_design_data %>% filter(!is.na(!!sym(var)))
    results_df$N_total[idx] <- nrow(var_data)
    results_df$N_treatments[idx] <- var_data %>% 
      summarise(n_treatments = n_distinct(treatment)) %>% 
      pull(n_treatments)
  }
  
  # Variable Labels
  results_df$Variable_Clean <- variable_labels[results_df$Variable]
  results_df$Variable_Clean <- ifelse(is.na(results_df$Variable_Clean), 
                                     results_df$Variable, results_df$Variable_Clean)
  
  # Sortiere nach Signifikanz und Task-Effektgröße
  results_df <- results_df %>%
    arrange(desc(Any_Significant), desc(Task_eta2))
  
  return(results_df)
}

mixed_design_results_table <- create_mixed_design_results_table()

print("MIXED-DESIGN FACTORIAL ERGEBNISTABELLE:")
## [1] "MIXED-DESIGN FACTORIAL ERGEBNISTABELLE:"
print(mixed_design_results_table %>%
      select(Variable_Clean, N_total, Any_Significant, Analysis_Type,
             Task_p, Task_eta2, Comm_p, Comm_eta2, 
             Interaction_p, Interaction_eta2))
##                    Variable_Clean N_total Any_Significant       Analysis_Type
## 1          Communication Required     240            TRUE Mixed-Effects ANOVA
## 2               Work Independence     240            TRUE Mixed-Effects ANOVA
## 3           Coordination Possible     240            TRUE Mixed-Effects ANOVA
## 4                 Social Presence     240            TRUE Mixed-Effects ANOVA
## 5              Clear Goal Process     240            TRUE Mixed-Effects ANOVA
## 6                Team Composition     240            TRUE Mixed-Effects ANOVA
## 7  Adequate Perspective Diversity     240            TRUE Mixed-Effects ANOVA
## 8               Mutual Dependence     240           FALSE Mixed-Effects ANOVA
## 9                 Team Motivation     240           FALSE Mixed-Effects ANOVA
## 10           Complementary Skills     240           FALSE Mixed-Effects ANOVA
## 11          Group Size Just Right     240           FALSE Mixed-Effects ANOVA
## 12      Perceived Task Complexity     240           FALSE Mixed-Effects ANOVA
##    Task_p Task_eta2 Comm_p Comm_eta2 Interaction_p Interaction_eta2
## 1  0.0000     0.454 0.0001     0.120        0.1638            0.016
## 2  0.0000     0.297 0.0826     0.025        0.7968            0.001
## 3  0.0039     0.068 0.0000     0.132        0.2488            0.011
## 4  0.0060     0.062 0.0259     0.041        0.1017            0.023
## 5  0.0249     0.042 0.2269     0.012        0.5632            0.003
## 6  0.0290     0.040 0.4823     0.004        0.5350            0.003
## 7  0.0336     0.038 0.8513     0.000        0.8996            0.000
## 8  0.0559     0.031 0.7061     0.001        0.5019            0.004
## 9  0.1307     0.019 0.7858     0.001        0.6303            0.002
## 10 0.3690     0.007 0.4530     0.005        0.2578            0.011
## 11 0.5563     0.003 0.2397     0.012        0.8444            0.000
## 12 0.7275     0.001 0.8148     0.000        0.2100            0.013
# ================================================================================
# SCHRITT 4: 4-TREATMENT BOXPLOTS
# ================================================================================

print("\n=== SCHRITT 4: 4-TREATMENT BOXPLOTS (MIXED-DESIGN) ===")
## [1] "\n=== SCHRITT 4: 4-TREATMENT BOXPLOTS (MIXED-DESIGN) ==="
# Bereite Plot-Daten vor
prepare_mixed_design_plot_data <- function() {
  
  plot_data <- mixed_design_data %>%
    select(participant.code, treatment, task, comm, all_of(team_factor_vars)) %>%
    pivot_longer(cols = all_of(team_factor_vars), names_to = "variable", values_to = "value") %>%
    filter(!is.na(value)) %>%
    mutate(
      variable_clean = variable_labels[variable],
      variable_clean = ifelse(is.na(variable_clean), variable, variable_clean)
    )
  
  return(plot_data)
}

mixed_design_plot_data <- prepare_mixed_design_plot_data()

# Erstelle 4-Treatment Boxplot Funktion
create_mixed_design_boxplot <- function(var_name, data = mixed_design_plot_data) {
  var_data <- data %>% filter(variable == var_name)
  
  if(nrow(var_data) == 0) return(NULL)
  
  var_label <- unique(var_data$variable_clean)[1]
  
  # Gruppiere nach Task, mit Communication nebeneinander
  # 2 Farben für Tasks, verschiedene Transparenz/Muster für Communication
  colors <- c("Math-Chat" = "#1F78B4", "Math-Video" = "#1F78B4", 
              "HP-Chat" = "#E31A1C", "HP-Video" = "#E31A1C")
  
  # Berechne Stichprobengrößen für jedes Treatment
  sample_sizes <- var_data %>%
    group_by(treatment) %>%
    summarise(n = n(), .groups = "drop")
  
  ggplot(var_data, aes(x = task, y = value, fill = treatment)) +
    geom_boxplot(aes(alpha = comm), 
                 position = position_dodge(width = 0.8), 
                 outlier.size = 1.5, 
                 width = 0.7) +
    geom_jitter(aes(color = treatment), 
                position = position_jitterdodge(dodge.width = 0.8, jitter.width = 0.2), 
                alpha = 0.5, size = 1.2) +
    # Füge Text-Labels direkt unter die Boxplots hinzu
    geom_text(data = data.frame(
                task = c("Math", "Math", "HP", "HP"),
                comm = c("Chat", "Video", "Chat", "Video"),
                treatment = c("Math-Chat", "Math-Video", "HP-Chat", "HP-Video"),
                y_pos = 0.7
              ),
              aes(x = task, y = y_pos, label = comm, fill = treatment),
              position = position_dodge(width = 0.8),
              size = 3, fontface = "bold", color = "black",
              inherit.aes = FALSE) +
    scale_fill_manual(values = colors, guide = "none") +
    scale_color_manual(values = colors, guide = "none") +
    scale_alpha_manual(values = c("Chat" = 0.6, "Video" = 1.0), guide = "none") +
    scale_y_continuous(limits = c(0.5, 7), breaks = 1:7) +
    labs(
      title = var_label,
      subtitle = "Mixed-Design: Communication (Between) × Task (Within)",
      x = "Task Type",
      y = "Response (7-Point Likert)",
      caption = paste("Sample sizes - Math-Chat:", 
                     sample_sizes$n[sample_sizes$treatment == "Math-Chat"],
                     ", Math-Video:", 
                     sample_sizes$n[sample_sizes$treatment == "Math-Video"],
                     ", HP-Chat:", 
                     sample_sizes$n[sample_sizes$treatment == "HP-Chat"],
                     ", HP-Video:", 
                     sample_sizes$n[sample_sizes$treatment == "HP-Video"])
    ) +
    theme_minimal() +
    theme(
      plot.title = element_text(size = 12, face = "bold", hjust = 0.5),
      plot.subtitle = element_text(size = 9, hjust = 0.5, color = "gray50"),
      plot.caption = element_text(size = 8, hjust = 0.5, color = "gray60"),
      axis.text.x = element_text(size = 10, face = "bold"),
      axis.text.y = element_text(size = 9),
      axis.title = element_text(size = 10, face = "bold"),
      legend.position = "none",  # Keine Legende mehr
      panel.grid.major.x = element_blank(),
      panel.grid.minor = element_blank()
    )
}

# Erstelle alle 4-Treatment Boxplots
mixed_design_boxplots <- list()
for(var in team_factor_vars) {
  if(var %in% unique(mixed_design_plot_data$variable)) {
    cat("Erstelle Mixed-Design Boxplot für:", var, "\n")
    plot <- create_mixed_design_boxplot(var, mixed_design_plot_data)
    if(!is.null(plot)) {
      mixed_design_boxplots[[var]] <- plot
    }
  }
}
## Erstelle Mixed-Design Boxplot für: team_composition_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
## Erstelle Mixed-Design Boxplot für: team_motivation_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
## Erstelle Mixed-Design Boxplot für: interdependence_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
## Erstelle Mixed-Design Boxplot für: common_goal_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
## Erstelle Mixed-Design Boxplot für: means_coordination_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
## Erstelle Mixed-Design Boxplot für: group_size_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
## Erstelle Mixed-Design Boxplot für: group_diversity_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
## Erstelle Mixed-Design Boxplot für: group_skill_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
## Erstelle Mixed-Design Boxplot für: communication_required_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
## Erstelle Mixed-Design Boxplot für: work_independence_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
## Erstelle Mixed-Design Boxplot für: social_presence_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
## Erstelle Mixed-Design Boxplot für: perceived_task_complexity_value
## Warning in geom_text(data = data.frame(task = c("Math", "Math", "HP", "HP"), :
## Ignoring unknown aesthetics: fill
# Zeige alle Plots einzeln
if(length(mixed_design_boxplots) > 0) {
  cat("\n=== EINZELNE 4-TREATMENT BOXPLOTS ===\n")
  
  for(var in names(mixed_design_boxplots)) {
    var_label <- variable_labels[var]
    var_label <- ifelse(is.na(var_label), var, var_label)
    
    cat("Zeige Mixed-Design Boxplot für:", var_label, "\n")
    print(mixed_design_boxplots[[var]])
  }
  
  cat("\nAlle", length(mixed_design_boxplots), "Mixed-Design Boxplots angezeigt.\n")
}
## 
## === EINZELNE 4-TREATMENT BOXPLOTS ===
## Zeige Mixed-Design Boxplot für: Team Composition

## Zeige Mixed-Design Boxplot für: Team Motivation

## Zeige Mixed-Design Boxplot für: Mutual Dependence

## Zeige Mixed-Design Boxplot für: Clear Goal Process

## Zeige Mixed-Design Boxplot für: Coordination Possible

## Zeige Mixed-Design Boxplot für: Group Size Just Right

## Zeige Mixed-Design Boxplot für: Adequate Perspective Diversity

## Zeige Mixed-Design Boxplot für: Complementary Skills

## Zeige Mixed-Design Boxplot für: Communication Required

## Zeige Mixed-Design Boxplot für: Work Independence

## Zeige Mixed-Design Boxplot für: Social Presence

## Zeige Mixed-Design Boxplot für: Perceived Task Complexity

## 
## Alle 12 Mixed-Design Boxplots angezeigt.
# ================================================================================
# SCHRITT 5: ZUSAMMENFASSUNG UND INTERPRETATION
# ================================================================================

cat("\n", paste(rep("=", 80), collapse=""), "\n")
## 
##  ================================================================================
cat("MIXED-DESIGN FACTORIAL ANALYSE ZUSAMMENFASSUNG\n")
## MIXED-DESIGN FACTORIAL ANALYSE ZUSAMMENFASSUNG
cat(paste(rep("=", 80), collapse=""), "\n")
## ================================================================================
# Zähle signifikante Effekte
significant_vars <- mixed_design_results_table$Variable[mixed_design_results_table$Any_Significant]
task_effects <- sum(mixed_design_results_table$Task_p < 0.05, na.rm = TRUE)
comm_effects <- sum(mixed_design_results_table$Comm_p < 0.05, na.rm = TRUE)
interaction_effects <- sum(mixed_design_results_table$Interaction_p < 0.05, na.rm = TRUE)

cat("📊 MIXED-DESIGN ERGEBNISÜBERSICHT:\n")
## 📊 MIXED-DESIGN ERGEBNISÜBERSICHT:
cat("- Analysierte Team-Faktoren:", nrow(mixed_design_results_table), "\n")
## - Analysierte Team-Faktoren: 12
cat("- Variablen mit signifikanten Effekten:", length(significant_vars), "\n")
## - Variablen mit signifikanten Effekten: 7
cat("- Task-Haupteffekte (Within-Subjects):", task_effects, "\n")
## - Task-Haupteffekte (Within-Subjects): 7
cat("- Communication-Haupteffekte (Between-Subjects):", comm_effects, "\n")
## - Communication-Haupteffekte (Between-Subjects): 3
cat("- Task × Communication Interaktionen:", interaction_effects, "\n\n")
## - Task × Communication Interaktionen: 0
if(length(significant_vars) > 0) {
  cat("✅ TEAM-FAKTOREN MIT SIGNIFIKANTEN EFFEKTEN:\n")
  for(var in significant_vars) {
    row <- mixed_design_results_table[mixed_design_results_table$Variable == var, ]
    
    effects <- c()
    if(!is.na(row$Task_p) && row$Task_p < 0.05) {
      effect_size <- ifelse(row$Task_eta2 < 0.06, "small", 
                           ifelse(row$Task_eta2 < 0.14, "medium", "large"))
      effects <- c(effects, paste("Task (η²=", row$Task_eta2, effect_size, ")"))
    }
    if(!is.na(row$Comm_p) && row$Comm_p < 0.05) {
      effect_size <- ifelse(row$Comm_eta2 < 0.06, "small", 
                           ifelse(row$Comm_eta2 < 0.14, "medium", "large"))
      effects <- c(effects, paste("Comm (η²=", row$Comm_eta2, effect_size, ")"))
    }
    if(!is.na(row$Interaction_p) && row$Interaction_p < 0.05) {
      effect_size <- ifelse(row$Interaction_eta2 < 0.06, "small", 
                           ifelse(row$Interaction_eta2 < 0.14, "medium", "large"))
      effects <- c(effects, paste("Interaction (η²=", row$Interaction_eta2, effect_size, ")"))
    }
    
    cat("  -", row$Variable_Clean, ":", paste(effects, collapse = ", "), "\n")
  }
  cat("\n")
} else {
  cat("❌ Keine signifikanten Effekte in den Team-Faktoren gefunden.\n\n")
}
## ✅ TEAM-FAKTOREN MIT SIGNIFIKANTEN EFFEKTEN:
##   - Communication Required : Task (η²= 0.454 large ), Comm (η²= 0.12 medium ) 
##   - Work Independence : Task (η²= 0.297 large ) 
##   - Coordination Possible : Task (η²= 0.068 medium ), Comm (η²= 0.132 medium ) 
##   - Social Presence : Task (η²= 0.062 medium ), Comm (η²= 0.041 small ) 
##   - Clear Goal Process : Task (η²= 0.042 small ) 
##   - Team Composition : Task (η²= 0.04 small ) 
##   - Adequate Perspective Diversity : Task (η²= 0.038 small )
cat("💡 MIXED-DESIGN INTERPRETATION:\n")
## 💡 MIXED-DESIGN INTERPRETATION:
if(task_effects > 0) {
  cat("✅ Task-Effekte (Within-Subjects): Math und HP Tasks unterscheiden sich strukturell\n")
  cat("   → Personen bewerten die Tasks unterschiedlich (hohe statistische Power)\n")
}
## ✅ Task-Effekte (Within-Subjects): Math und HP Tasks unterscheiden sich strukturell
##    → Personen bewerten die Tasks unterschiedlich (hohe statistische Power)
if(comm_effects > 0) {
  cat("✅ Communication-Effekte (Between-Subjects): Chat und Video wirken unterschiedlich\n")
  cat("   → Kommunikationsmedium beeinflusst Team-Wahrnehmung\n")
}
## ✅ Communication-Effekte (Between-Subjects): Chat und Video wirken unterschiedlich
##    → Kommunikationsmedium beeinflusst Team-Wahrnehmung
if(interaction_effects > 0) {
  cat("✅ Interaktions-Effekte: Task und Communication beeinflussen sich gegenseitig\n")
  cat("   → Der Kommunikationseffekt hängt vom Task ab (oder umgekehrt)\n")
}

if(task_effects == 0 && comm_effects == 0 && interaction_effects == 0) {
  cat("ℹ️ Keine signifikanten Unterschiede zwischen den 4 Treatments gefunden.\n")
  cat("   Mögliche Erklärungen:\n")
  cat("   - Team-Faktoren sind robust gegenüber Task- und Kommunikations-Variationen\n")
  cat("   - Effekte sind zu klein für die aktuelle Stichprobengröße\n")
  cat("   - Andere Faktoren (individuelle Unterschiede) überlagern Treatment-Effekte\n")
}

# Design-Validierung Zusammenfassung
total_participants <- mixed_design_data %>% 
  summarise(n_unique = n_distinct(participant.code)) %>% 
  pull(n_unique)

within_subjects_complete <- sum(design_check$n_tasks == 2)
between_subjects_clean <- sum(design_check$n_comms == 1)

cat("\n📋 DESIGN-VALIDIERUNG:\n")
## 
## 📋 DESIGN-VALIDIERUNG:
cat("- Gesamte Teilnehmer:", total_participants, "\n")
## - Gesamte Teilnehmer: 120
cat("- Vollständige Within-Subjects Daten (beide Tasks):", within_subjects_complete, 
    "(", round(100*within_subjects_complete/total_participants, 1), "%)\n")
## - Vollständige Within-Subjects Daten (beide Tasks): 120 ( 100 %)
cat("- Saubere Between-Subjects Zuordnung (eine Comm-Form):", between_subjects_clean,
    "(", round(100*between_subjects_clean/total_participants, 1), "%)\n")
## - Saubere Between-Subjects Zuordnung (eine Comm-Form): 120 ( 100 %)
if(within_subjects_complete == total_participants && between_subjects_clean == total_participants) {
  cat("✅ Perfektes Mixed-Design: Alle Teilnehmer haben beide Tasks und nur eine Kommunikationsform\n")
} else {
  cat("⚠️ Unvollständiges Mixed-Design: Prüfe Datenqualität\n")
}
## ✅ Perfektes Mixed-Design: Alle Teilnehmer haben beide Tasks und nur eine Kommunikationsform
print("\nMixed-Design Factorial Analyse abgeschlossen! 🎯")
## [1] "\nMixed-Design Factorial Analyse abgeschlossen! 🎯"

Regression of flow on familiarity scores

# --- 1. Korrelationsanalysen für Familiarity-Variablen ----------------------

# Math Jitsi
math_jitsi_fam <- data %>%
  select(starts_with("mathJitsi.6.player.fam")) %>%
  select(contains("lightcoral"), contains("lightgreen"), contains("lightblue"))

cor_math_jitsi <- cor(math_jitsi_fam, use = "pairwise.complete.obs")
print("Correlation Matrix - Math Jitsi:")
## [1] "Correlation Matrix - Math Jitsi:"
print(round(cor_math_jitsi, 3))
##                                    mathJitsi.6.player.fam1_lightcoral
## mathJitsi.6.player.fam1_lightcoral                              1.000
## mathJitsi.6.player.fam2_lightcoral                              0.650
## mathJitsi.6.player.fam1_lightgreen                              0.621
## mathJitsi.6.player.fam2_lightgreen                              0.705
## mathJitsi.6.player.fam1_lightblue                               0.616
## mathJitsi.6.player.fam2_lightblue                               0.276
##                                    mathJitsi.6.player.fam2_lightcoral
## mathJitsi.6.player.fam1_lightcoral                              0.650
## mathJitsi.6.player.fam2_lightcoral                              1.000
## mathJitsi.6.player.fam1_lightgreen                              0.380
## mathJitsi.6.player.fam2_lightgreen                              0.792
## mathJitsi.6.player.fam1_lightblue                               0.094
## mathJitsi.6.player.fam2_lightblue                               0.589
##                                    mathJitsi.6.player.fam1_lightgreen
## mathJitsi.6.player.fam1_lightcoral                              0.621
## mathJitsi.6.player.fam2_lightcoral                              0.380
## mathJitsi.6.player.fam1_lightgreen                              1.000
## mathJitsi.6.player.fam2_lightgreen                              0.761
## mathJitsi.6.player.fam1_lightblue                               1.000
## mathJitsi.6.player.fam2_lightblue                               0.910
##                                    mathJitsi.6.player.fam2_lightgreen
## mathJitsi.6.player.fam1_lightcoral                              0.705
## mathJitsi.6.player.fam2_lightcoral                              0.792
## mathJitsi.6.player.fam1_lightgreen                              0.761
## mathJitsi.6.player.fam2_lightgreen                              1.000
## mathJitsi.6.player.fam1_lightblue                               0.909
## mathJitsi.6.player.fam2_lightblue                               0.996
##                                    mathJitsi.6.player.fam1_lightblue
## mathJitsi.6.player.fam1_lightcoral                             0.616
## mathJitsi.6.player.fam2_lightcoral                             0.094
## mathJitsi.6.player.fam1_lightgreen                             1.000
## mathJitsi.6.player.fam2_lightgreen                             0.909
## mathJitsi.6.player.fam1_lightblue                              1.000
## mathJitsi.6.player.fam2_lightblue                              0.599
##                                    mathJitsi.6.player.fam2_lightblue
## mathJitsi.6.player.fam1_lightcoral                             0.276
## mathJitsi.6.player.fam2_lightcoral                             0.589
## mathJitsi.6.player.fam1_lightgreen                             0.910
## mathJitsi.6.player.fam2_lightgreen                             0.996
## mathJitsi.6.player.fam1_lightblue                              0.599
## mathJitsi.6.player.fam2_lightblue                              1.000
# Math Chat
math_chat_fam <- data %>%
  select(starts_with("mathChat.6.player.fam")) %>%
  select(contains("lightcoral"), contains("lightgreen"), contains("lightblue"))

cor_math_chat <- cor(math_chat_fam, use = "pairwise.complete.obs")
print("\nCorrelation Matrix - Math Chat:")
## [1] "\nCorrelation Matrix - Math Chat:"
print(round(cor_math_chat, 3))
##                                   mathChat.6.player.fam1_lightcoral
## mathChat.6.player.fam1_lightcoral                             1.000
## mathChat.6.player.fam2_lightcoral                             0.322
## mathChat.6.player.fam1_lightgreen                             0.921
## mathChat.6.player.fam2_lightgreen                             0.125
## mathChat.6.player.fam1_lightblue                              0.888
## mathChat.6.player.fam2_lightblue                              0.441
##                                   mathChat.6.player.fam2_lightcoral
## mathChat.6.player.fam1_lightcoral                             0.322
## mathChat.6.player.fam2_lightcoral                             1.000
## mathChat.6.player.fam1_lightgreen                            -0.082
## mathChat.6.player.fam2_lightgreen                             0.860
## mathChat.6.player.fam1_lightblue                              0.463
## mathChat.6.player.fam2_lightblue                              0.802
##                                   mathChat.6.player.fam1_lightgreen
## mathChat.6.player.fam1_lightcoral                             0.921
## mathChat.6.player.fam2_lightcoral                            -0.082
## mathChat.6.player.fam1_lightgreen                             1.000
## mathChat.6.player.fam2_lightgreen                             0.393
## mathChat.6.player.fam1_lightblue                              0.828
## mathChat.6.player.fam2_lightblue                              0.259
##                                   mathChat.6.player.fam2_lightgreen
## mathChat.6.player.fam1_lightcoral                             0.125
## mathChat.6.player.fam2_lightcoral                             0.860
## mathChat.6.player.fam1_lightgreen                             0.393
## mathChat.6.player.fam2_lightgreen                             1.000
## mathChat.6.player.fam1_lightblue                              0.479
## mathChat.6.player.fam2_lightblue                              0.822
##                                   mathChat.6.player.fam1_lightblue
## mathChat.6.player.fam1_lightcoral                            0.888
## mathChat.6.player.fam2_lightcoral                            0.463
## mathChat.6.player.fam1_lightgreen                            0.828
## mathChat.6.player.fam2_lightgreen                            0.479
## mathChat.6.player.fam1_lightblue                             1.000
## mathChat.6.player.fam2_lightblue                             0.438
##                                   mathChat.6.player.fam2_lightblue
## mathChat.6.player.fam1_lightcoral                            0.441
## mathChat.6.player.fam2_lightcoral                            0.802
## mathChat.6.player.fam1_lightgreen                            0.259
## mathChat.6.player.fam2_lightgreen                            0.822
## mathChat.6.player.fam1_lightblue                             0.438
## mathChat.6.player.fam2_lightblue                             1.000
# HP Jitsi
hp_jitsi_fam <- data %>%
  select(starts_with("HiddenProfile_Jitsi.3.player.fam")) %>%
  select(contains("lightcoral"), contains("lightgreen"), contains("lightblue"))

cor_hp_jitsi <- cor(hp_jitsi_fam, use = "pairwise.complete.obs")
print("\nCorrelation Matrix - HP Jitsi:")
## [1] "\nCorrelation Matrix - HP Jitsi:"
print(round(cor_hp_jitsi, 3))
##                                              HiddenProfile_Jitsi.3.player.fam1_lightcoral
## HiddenProfile_Jitsi.3.player.fam1_lightcoral                                        1.000
## HiddenProfile_Jitsi.3.player.fam2_lightcoral                                        0.566
## HiddenProfile_Jitsi.3.player.fam1_lightgreen                                        0.622
## HiddenProfile_Jitsi.3.player.fam2_lightgreen                                        0.336
## HiddenProfile_Jitsi.3.player.fam1_lightblue                                         0.697
## HiddenProfile_Jitsi.3.player.fam2_lightblue                                         0.128
##                                              HiddenProfile_Jitsi.3.player.fam2_lightcoral
## HiddenProfile_Jitsi.3.player.fam1_lightcoral                                        0.566
## HiddenProfile_Jitsi.3.player.fam2_lightcoral                                        1.000
## HiddenProfile_Jitsi.3.player.fam1_lightgreen                                        0.056
## HiddenProfile_Jitsi.3.player.fam2_lightgreen                                        0.379
## HiddenProfile_Jitsi.3.player.fam1_lightblue                                         0.245
## HiddenProfile_Jitsi.3.player.fam2_lightblue                                         0.640
##                                              HiddenProfile_Jitsi.3.player.fam1_lightgreen
## HiddenProfile_Jitsi.3.player.fam1_lightcoral                                        0.622
## HiddenProfile_Jitsi.3.player.fam2_lightcoral                                        0.056
## HiddenProfile_Jitsi.3.player.fam1_lightgreen                                        1.000
## HiddenProfile_Jitsi.3.player.fam2_lightgreen                                        0.622
## HiddenProfile_Jitsi.3.player.fam1_lightblue                                         0.995
## HiddenProfile_Jitsi.3.player.fam2_lightblue                                         0.639
##                                              HiddenProfile_Jitsi.3.player.fam2_lightgreen
## HiddenProfile_Jitsi.3.player.fam1_lightcoral                                        0.336
## HiddenProfile_Jitsi.3.player.fam2_lightcoral                                        0.379
## HiddenProfile_Jitsi.3.player.fam1_lightgreen                                        0.622
## HiddenProfile_Jitsi.3.player.fam2_lightgreen                                        1.000
## HiddenProfile_Jitsi.3.player.fam1_lightblue                                         0.686
## HiddenProfile_Jitsi.3.player.fam2_lightblue                                         0.982
##                                              HiddenProfile_Jitsi.3.player.fam1_lightblue
## HiddenProfile_Jitsi.3.player.fam1_lightcoral                                       0.697
## HiddenProfile_Jitsi.3.player.fam2_lightcoral                                       0.245
## HiddenProfile_Jitsi.3.player.fam1_lightgreen                                       0.995
## HiddenProfile_Jitsi.3.player.fam2_lightgreen                                       0.686
## HiddenProfile_Jitsi.3.player.fam1_lightblue                                        1.000
## HiddenProfile_Jitsi.3.player.fam2_lightblue                                        0.572
##                                              HiddenProfile_Jitsi.3.player.fam2_lightblue
## HiddenProfile_Jitsi.3.player.fam1_lightcoral                                       0.128
## HiddenProfile_Jitsi.3.player.fam2_lightcoral                                       0.640
## HiddenProfile_Jitsi.3.player.fam1_lightgreen                                       0.639
## HiddenProfile_Jitsi.3.player.fam2_lightgreen                                       0.982
## HiddenProfile_Jitsi.3.player.fam1_lightblue                                        0.572
## HiddenProfile_Jitsi.3.player.fam2_lightblue                                        1.000
# HP Chat
hp_chat_fam <- data %>%
  select(starts_with("HiddenProfile_Chat.3.player.fam")) %>%
  select(contains("lightcoral"), contains("lightgreen"), contains("lightblue"))

cor_hp_chat <- cor(hp_chat_fam, use = "pairwise.complete.obs")
print("\nCorrelation Matrix - HP Chat:")
## [1] "\nCorrelation Matrix - HP Chat:"
print(round(cor_hp_chat, 3))
##                                             HiddenProfile_Chat.3.player.fam1_lightcoral
## HiddenProfile_Chat.3.player.fam1_lightcoral                                       1.000
## HiddenProfile_Chat.3.player.fam2_lightcoral                                       0.280
## HiddenProfile_Chat.3.player.fam1_lightgreen                                       0.916
## HiddenProfile_Chat.3.player.fam2_lightgreen                                       0.307
## HiddenProfile_Chat.3.player.fam1_lightblue                                        0.907
## HiddenProfile_Chat.3.player.fam2_lightblue                                       -0.091
##                                             HiddenProfile_Chat.3.player.fam2_lightcoral
## HiddenProfile_Chat.3.player.fam1_lightcoral                                       0.280
## HiddenProfile_Chat.3.player.fam2_lightcoral                                       1.000
## HiddenProfile_Chat.3.player.fam1_lightgreen                                       0.306
## HiddenProfile_Chat.3.player.fam2_lightgreen                                       0.714
## HiddenProfile_Chat.3.player.fam1_lightblue                                       -0.022
## HiddenProfile_Chat.3.player.fam2_lightblue                                        0.530
##                                             HiddenProfile_Chat.3.player.fam1_lightgreen
## HiddenProfile_Chat.3.player.fam1_lightcoral                                       0.916
## HiddenProfile_Chat.3.player.fam2_lightcoral                                       0.306
## HiddenProfile_Chat.3.player.fam1_lightgreen                                       1.000
## HiddenProfile_Chat.3.player.fam2_lightgreen                                       0.338
## HiddenProfile_Chat.3.player.fam1_lightblue                                        0.713
## HiddenProfile_Chat.3.player.fam2_lightblue                                        0.258
##                                             HiddenProfile_Chat.3.player.fam2_lightgreen
## HiddenProfile_Chat.3.player.fam1_lightcoral                                       0.307
## HiddenProfile_Chat.3.player.fam2_lightcoral                                       0.714
## HiddenProfile_Chat.3.player.fam1_lightgreen                                       0.338
## HiddenProfile_Chat.3.player.fam2_lightgreen                                       1.000
## HiddenProfile_Chat.3.player.fam1_lightblue                                        0.419
## HiddenProfile_Chat.3.player.fam2_lightblue                                        0.883
##                                             HiddenProfile_Chat.3.player.fam1_lightblue
## HiddenProfile_Chat.3.player.fam1_lightcoral                                      0.907
## HiddenProfile_Chat.3.player.fam2_lightcoral                                     -0.022
## HiddenProfile_Chat.3.player.fam1_lightgreen                                      0.713
## HiddenProfile_Chat.3.player.fam2_lightgreen                                      0.419
## HiddenProfile_Chat.3.player.fam1_lightblue                                       1.000
## HiddenProfile_Chat.3.player.fam2_lightblue                                       0.252
##                                             HiddenProfile_Chat.3.player.fam2_lightblue
## HiddenProfile_Chat.3.player.fam1_lightcoral                                     -0.091
## HiddenProfile_Chat.3.player.fam2_lightcoral                                      0.530
## HiddenProfile_Chat.3.player.fam1_lightgreen                                      0.258
## HiddenProfile_Chat.3.player.fam2_lightgreen                                      0.883
## HiddenProfile_Chat.3.player.fam1_lightblue                                       0.252
## HiddenProfile_Chat.3.player.fam2_lightblue                                       1.000
# --- 2. Familiarity: Aggregation pro Farbe (fam1 & fam2 mitteln) ----------------------

data <- data %>%
  mutate(
    # Math – Jitsi
    fam_mathJitsi_coral = rowMeans(select(., mathJitsi.6.player.fam1_lightcoral, mathJitsi.6.player.fam2_lightcoral), na.rm = TRUE),
    fam_mathJitsi_green = rowMeans(select(., mathJitsi.6.player.fam1_lightgreen, mathJitsi.6.player.fam2_lightgreen), na.rm = TRUE),
    fam_mathJitsi_blue  = rowMeans(select(., mathJitsi.6.player.fam1_lightblue,  mathJitsi.6.player.fam2_lightblue),  na.rm = TRUE),
    
    # Math – Chat
    fam_mathChat_coral = rowMeans(select(., mathChat.6.player.fam1_lightcoral, mathChat.6.player.fam2_lightcoral), na.rm = TRUE),
    fam_mathChat_green = rowMeans(select(., mathChat.6.player.fam1_lightgreen, mathChat.6.player.fam2_lightgreen), na.rm = TRUE),
    fam_mathChat_blue  = rowMeans(select(., mathChat.6.player.fam1_lightblue,  mathChat.6.player.fam2_lightblue),  na.rm = TRUE),

    # HP – Jitsi
    fam_hpJitsi_coral = rowMeans(select(., HiddenProfile_Jitsi.3.player.fam1_lightcoral, HiddenProfile_Jitsi.3.player.fam2_lightcoral), na.rm = TRUE),
    fam_hpJitsi_green = rowMeans(select(., HiddenProfile_Jitsi.3.player.fam1_lightgreen, HiddenProfile_Jitsi.3.player.fam2_lightgreen), na.rm = TRUE),
    fam_hpJitsi_blue  = rowMeans(select(., HiddenProfile_Jitsi.3.player.fam1_lightblue,  HiddenProfile_Jitsi.3.player.fam2_lightblue),  na.rm = TRUE),

    # HP – Chat
    fam_hpChat_coral = rowMeans(select(., HiddenProfile_Chat.3.player.fam1_lightcoral, HiddenProfile_Chat.3.player.fam2_lightcoral), na.rm = TRUE),
    fam_hpChat_green = rowMeans(select(., HiddenProfile_Chat.3.player.fam1_lightgreen, HiddenProfile_Chat.3.player.fam2_lightgreen), na.rm = TRUE),
    fam_hpChat_blue  = rowMeans(select(., HiddenProfile_Chat.3.player.fam1_lightblue,  HiddenProfile_Chat.3.player.fam2_lightblue),  na.rm = TRUE)
  )

# --- 3. Familiarity: Aggregation pro Bedingung (über die zwei befüllten Farben) -------

data <- data %>%
  mutate(
    fam_mathJitsi = rowMeans(select(., fam_mathJitsi_coral, fam_mathJitsi_green, fam_mathJitsi_blue), na.rm = TRUE),
    fam_mathChat  = rowMeans(select(., fam_mathChat_coral, fam_mathChat_green, fam_mathChat_blue), na.rm = TRUE),
    fam_hpJitsi   = rowMeans(select(., fam_hpJitsi_coral, fam_hpJitsi_green, fam_hpJitsi_blue), na.rm = TRUE),
    fam_hpChat    = rowMeans(select(., fam_hpChat_coral, fam_hpChat_green, fam_hpChat_blue), na.rm = TRUE)
  )

# --- 4. Recognition: Kategorisierung für Analyse ---------------

data <- data %>%
  mutate(
    # Individual recognition scores
    rec_coral = Outro.1.player.rec_lightcoral,
    rec_green = Outro.1.player.rec_lightgreen,
    rec_blue  = Outro.1.player.rec_lightblue
  ) %>%
  mutate(
    # Mean recognition across teammates
    rec_mean = rowMeans(select(., rec_coral, rec_green, rec_blue), na.rm = TRUE),
    
    # Count how many teammates were known (>4 on 7-point scale)
    rec_count = rowSums(select(., rec_coral, rec_green, rec_blue) > 4, na.rm = TRUE),
    
    # Categorical variable
    rec_category = case_when(
      rec_count == 0 ~ "Nobody known",
      rec_count == 1 ~ "One person known",
      rec_count == 2 ~ "Both known",
      TRUE ~ NA_character_
    )
  )

# --- 5. Deskriptive Statistiken ---------------

# Recognition categories
print("\n--- Recognition Categories ---")
## [1] "\n--- Recognition Categories ---"
table(data$rec_category)
## 
##       Both known     Nobody known One person known 
##               10               93               17
# Familiarity means by condition
print("\n--- Mean Familiarity by Condition ---")
## [1] "\n--- Mean Familiarity by Condition ---"
data %>%
  summarise(
    MathJitsi = mean(fam_mathJitsi, na.rm = TRUE),
    MathChat = mean(fam_mathChat, na.rm = TRUE),
    HPJitsi = mean(fam_hpJitsi, na.rm = TRUE),
    HPChat = mean(fam_hpChat, na.rm = TRUE)
  ) %>%
  print()
##   MathJitsi MathChat HPJitsi   HPChat
## 1  4.408333 3.379167   4.675 3.908333
# --- 6. Long format for regression analyses ---------------

# Da die Flow-Scores in einem separaten Dataframe sind, müssen wir anders vorgehen
# Erst aggregieren wir die Flow-Scores pro Bedingung (über alle Schwierigkeiten)

print("\n--- Aggregating flow scores ---")
## [1] "\n--- Aggregating flow scores ---"
flow_aggregated <- flow_clean %>%
  group_by(participant.code, task, comm) %>%
  summarise(
    flow_score = mean(flow_score, na.rm = TRUE),
    n_difficulties = n(),  # Anzahl der Schwierigkeitsstufen
    .groups = 'drop'
  )

print("Sample of aggregated flow scores:")
## [1] "Sample of aggregated flow scores:"
print(head(flow_aggregated))
## # A tibble: 6 × 5
##   participant.code task  comm  flow_score n_difficulties
##   <chr>            <chr> <chr>      <dbl>          <int>
## 1 041p1uqk         HP    Jitsi       5.04              3
## 2 041p1uqk         Math  Jitsi       4.97              4
## 3 08j3vzzw         HP    Jitsi       5.22              3
## 4 08j3vzzw         Math  Jitsi       5.28              4
## 5 0fakmdd1         HP    Jitsi       6.30              3
## 6 0fakmdd1         Math  Jitsi       6.67              4
# Erstelle das Long-Format für Familiarity
familiarity_long <- data %>%
  select(participant.code,
         fam_mathJitsi, fam_mathChat, fam_hpJitsi, fam_hpChat,
         rec_mean, rec_count, rec_category) %>%
  pivot_longer(
    cols = starts_with("fam_"),
    names_to = "condition",
    values_to = "familiarity"
  ) %>%
  mutate(
    task = case_when(
      str_detect(condition, "math") ~ "Math", 
      TRUE ~ "HP"  # HP wie in flow_clean
    ),
    comm = case_when(
      str_detect(condition, "Chat") ~ "Chat", 
      TRUE ~ "Jitsi"
    )
  )

# Merge mit Flow-Scores
familiarity_long <- familiarity_long %>%
  left_join(
    flow_aggregated %>% select(participant.code, task, comm, flow_score),
    by = c("participant.code", "task", "comm")
  ) %>%
  drop_na(flow_score)

# Check wie viele Zeilen wir haben
print(paste("\nRows in familiarity_long:", nrow(familiarity_long)))
## [1] "\nRows in familiarity_long: 240"
print(paste("Unique participants:", n_distinct(familiarity_long$participant.code)))
## [1] "Unique participants: 120"
# Überprüfe die Verteilung
print("\nDistribution by condition:")
## [1] "\nDistribution by condition:"
print(table(familiarity_long$task, familiarity_long$comm))
##       
##        Chat Jitsi
##   HP     60    60
##   Math   60    60
# --- 7. Regression Models ---------------

print("\n--- Model A: Basic (Familiarity + Recognition) ---")
## [1] "\n--- Model A: Basic (Familiarity + Recognition) ---"
model_a <- lm(flow_score ~ familiarity + rec_mean, data = familiarity_long)
summary(model_a)
## 
## Call:
## lm(formula = flow_score ~ familiarity + rec_mean, data = familiarity_long)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.38256 -0.54413  0.04534  0.63918  1.76653 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  4.90542    0.15524  31.600   <2e-16 ***
## familiarity  0.10232    0.04046   2.529   0.0121 *  
## rec_mean     0.02109    0.03647   0.578   0.5636    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8536 on 237 degrees of freedom
## Multiple R-squared:  0.04297,    Adjusted R-squared:  0.03489 
## F-statistic: 5.321 on 2 and 237 DF,  p-value: 0.005492
print("\n--- Model B: With Task and Communication ---")
## [1] "\n--- Model B: With Task and Communication ---"
model_b <- lm(flow_score ~ familiarity + rec_mean + task + comm, data = familiarity_long)
summary(model_b)
## 
## Call:
## lm(formula = flow_score ~ familiarity + rec_mean + task + comm, 
##     data = familiarity_long)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.34142 -0.51506  0.00657  0.62110  1.82027 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  4.83411    0.17355  27.854  < 2e-16 ***
## familiarity  0.10862    0.04128   2.631  0.00907 ** 
## rec_mean     0.01975    0.03854   0.513  0.60872    
## taskMath     0.10850    0.11166   0.972  0.33222    
## commJitsi   -0.01220    0.12141  -0.101  0.92001    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8555 on 235 degrees of freedom
## Multiple R-squared:  0.04683,    Adjusted R-squared:  0.0306 
## F-statistic: 2.886 on 4 and 235 DF,  p-value: 0.02321
print("\n--- Model C: With Interactions ---")
## [1] "\n--- Model C: With Interactions ---"
model_c <- lm(flow_score ~ familiarity * comm + rec_mean * comm + task, data = familiarity_long)
summary(model_c)
## 
## Call:
## lm(formula = flow_score ~ familiarity * comm + rec_mean * comm + 
##     task, data = familiarity_long)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.36589 -0.55483  0.00687  0.61510  1.77505 
## 
## Coefficients:
##                        Estimate Std. Error t value Pr(>|t|)    
## (Intercept)            5.044482   0.258326  19.528   <2e-16 ***
## familiarity            0.098247   0.061046   1.609   0.1089    
## commJitsi             -0.278195   0.334101  -0.833   0.4059    
## rec_mean              -0.114269   0.085694  -1.333   0.1837    
## taskMath               0.105118   0.111619   0.942   0.3473    
## familiarity:commJitsi  0.005593   0.082298   0.068   0.9459    
## commJitsi:rec_mean     0.163750   0.096860   1.691   0.0923 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8535 on 233 degrees of freedom
## Multiple R-squared:  0.05951,    Adjusted R-squared:  0.03529 
## F-statistic: 2.457 on 6 and 233 DF,  p-value: 0.0253
print("\n--- Model D: Recognition Categories ---")
## [1] "\n--- Model D: Recognition Categories ---"
model_d <- lm(flow_score ~ familiarity + rec_category + task + comm, data = familiarity_long)
summary(model_d)
## 
## Call:
## lm(formula = flow_score ~ familiarity + rec_category + task + 
##     comm, data = familiarity_long)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.34303 -0.55619 -0.00725  0.65003  1.81438 
## 
## Coefficients:
##                              Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                   5.05905    0.34786  14.543   <2e-16 ***
## familiarity                   0.10648    0.04191   2.541   0.0117 *  
## rec_categoryNobody known     -0.19287    0.23560  -0.819   0.4138    
## rec_categoryOne person known -0.30514    0.24888  -1.226   0.2214    
## taskMath                      0.10765    0.11164   0.964   0.3359    
## commJitsi                     0.01940    0.12499   0.155   0.8768    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8551 on 234 degrees of freedom
## Multiple R-squared:  0.0519, Adjusted R-squared:  0.03164 
## F-statistic: 2.562 on 5 and 234 DF,  p-value: 0.02798
print("\n--- Model E: Non-linear Recognition Effect ---")
## [1] "\n--- Model E: Non-linear Recognition Effect ---"
model_e <- lm(flow_score ~ familiarity + rec_count + I(rec_count^2) + task + comm, data = familiarity_long)
summary(model_e)
## 
## Call:
## lm(formula = flow_score ~ familiarity + rec_count + I(rec_count^2) + 
##     task + comm, data = familiarity_long)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.34303 -0.55619 -0.00725  0.65003  1.81438 
## 
## Coefficients:
##                Estimate Std. Error t value Pr(>|t|)    
## (Intercept)     4.86618    0.18528  26.264   <2e-16 ***
## familiarity     0.10648    0.04191   2.541   0.0117 *  
## rec_count      -0.32099    0.34152  -0.940   0.3483    
## I(rec_count^2)  0.20871    0.18207   1.146   0.2528    
## taskMath        0.10765    0.11164   0.964   0.3359    
## commJitsi       0.01940    0.12499   0.155   0.8768    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8551 on 234 degrees of freedom
## Multiple R-squared:  0.0519, Adjusted R-squared:  0.03164 
## F-statistic: 2.562 on 5 and 234 DF,  p-value: 0.02798
# --- 8. Visualizations ---------------

# Plot 1: Familiarity vs Flow by Communication Type
p1 <- ggplot(familiarity_long, aes(x = familiarity, y = flow_score, color = comm)) +
  geom_point(alpha = 0.6) +
  geom_smooth(method = "lm", se = TRUE) +
  facet_wrap(~ task) +
  labs(
    title = "Familiarity vs. Flow by Communication Type and Task",
    x = "Familiarity with Teammates (during task)",
    y = "Flow Score",
    color = "Communication"
  ) +
  theme_minimal()

print(p1)
## `geom_smooth()` using formula = 'y ~ x'

# Plot 2: Recognition Categories and Flow
p2 <- ggplot(familiarity_long, aes(x = rec_category, y = flow_score, fill = rec_category)) +
  geom_boxplot(alpha = 0.7) +
  geom_jitter(width = 0.2, alpha = 0.3) +
  facet_grid(task ~ comm) +
  labs(
    title = "Flow by Prior Recognition of Teammates",
    x = "Prior Recognition",
    y = "Flow Score"
  ) +
  theme_minimal() +
  theme(axis.text.x = element_text(angle = 45, hjust = 1))

print(p2)

# Plot 3: Recognition Mean vs Flow
p3 <- ggplot(familiarity_long, aes(x = rec_mean, y = flow_score)) +
  geom_point(alpha = 0.5) +
  geom_smooth(method = "lm", se = TRUE) +
  facet_grid(task ~ comm) +
  labs(
    title = "Mean Prior Recognition vs. Flow",
    x = "Mean Recognition Score",
    y = "Flow Score"
  ) +
  theme_minimal()

print(p3)
## `geom_smooth()` using formula = 'y ~ x'

# --- 9. Additional Analyses ---------------

# Check if recognition effect differs by communication type
print("\n--- Model F: Recognition × Communication Interaction ---")
## [1] "\n--- Model F: Recognition × Communication Interaction ---"
model_f <- lm(flow_score ~ familiarity + rec_mean * comm + task, data = familiarity_long)
summary(model_f)
## 
## Call:
## lm(formula = flow_score ~ familiarity + rec_mean * comm + task, 
##     data = familiarity_long)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.36997 -0.55582  0.00658  0.61588  1.77707 
## 
## Coefficients:
##                    Estimate Std. Error t value Pr(>|t|)    
## (Intercept)         5.03395    0.20620  24.413   <2e-16 ***
## familiarity         0.10130    0.04130   2.453   0.0149 *  
## rec_mean           -0.11490    0.08500  -1.352   0.1777    
## commJitsi          -0.25927    0.18432  -1.407   0.1609    
## taskMath            0.10559    0.11117   0.950   0.3432    
## rec_mean:commJitsi  0.16549    0.09322   1.775   0.0772 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8517 on 234 degrees of freedom
## Multiple R-squared:  0.05949,    Adjusted R-squared:  0.0394 
## F-statistic:  2.96 on 5 and 234 DF,  p-value: 0.01302
# Correlation between familiarity and recognition
print("\n--- Correlation: Familiarity vs Recognition ---")
## [1] "\n--- Correlation: Familiarity vs Recognition ---"
cor.test(familiarity_long$familiarity, familiarity_long$rec_mean)
## 
##  Pearson's product-moment correlation
## 
## data:  familiarity_long$familiarity and familiarity_long$rec_mean
## t = 8.5371, df = 238, p-value = 1.645e-15
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.3809132 0.5755309
## sample estimates:
##       cor 
## 0.4841883
# Mean flow by recognition categories
print("\n--- Mean Flow by Recognition Category ---")
## [1] "\n--- Mean Flow by Recognition Category ---"
familiarity_long %>%
  group_by(rec_category) %>%
  summarise(
    mean_flow = mean(flow_score, na.rm = TRUE),
    sd_flow = sd(flow_score, na.rm = TRUE),
    n = n()
  ) %>%
  print()
## # A tibble: 3 × 4
##   rec_category     mean_flow sd_flow     n
##   <chr>                <dbl>   <dbl> <int>
## 1 Both known            5.81   0.693    20
## 2 Nobody known          5.32   0.868   186
## 3 One person known      5.35   0.916    34

Regression of flow on gender distribution

# Vereinfachte Gender Composition Analyse
# ================================================================================
# TEIL 1: GENDER COMPOSITION ZU FLOW_SCORES HINZUFÜGEN
# ================================================================================

# Team Gender Composition berechnen
team_gender_composition <- data %>%
  dplyr::select(participant.code, team_id, gender = Intro.1.player.gender) %>%
  filter(!is.na(gender), !is.na(team_id)) %>%
  group_by(team_id) %>%
  dplyr::summarise(
    n_members = n(),
    n_male = sum(gender == "Male", na.rm = TRUE),
    n_female = sum(gender == "Female", na.rm = TRUE),
    n_other = sum(!gender %in% c("Male", "Female"), na.rm = TRUE),
    unique_genders = n_distinct(gender),
    .groups = "drop"
  ) %>%
  mutate(
    gender_comp = case_when(
      n_male == n_members ~ "all_male",
      n_female == n_members ~ "all_female",
      n_male > 0 & n_female > 0 ~ "mixed",
      TRUE ~ "other"
    )
  )

print("Team Gender Composition erstellt:")
## [1] "Team Gender Composition erstellt:"
print(table(team_gender_composition$gender_comp))
## 
## all_male    mixed 
##       13       27
# Gender composition zu flow_clean hinzufügen (sollte bereits existieren)
if(!"gender_comp" %in% names(flow_clean)) {
  flow_scores_gender <- flow_clean %>%
    left_join(team_gender_composition %>% dplyr::select(team_id, gender_comp), 
              by = "team_id")
} else {
  flow_scores_gender <- flow_clean
}

# Überprüfe Verteilung
print("Team Gender Composition Verteilung:")
## [1] "Team Gender Composition Verteilung:"
print(table(flow_scores_gender$gender_comp, useNA = "ifany"))
## 
## all_male    mixed 
##      268      537
# TEIL 2: DESKRIPTIVE STATISTIKEN
# ================================================================================

print("\n--- DESKRIPTIVE STATISTIKEN ---")
## [1] "\n--- DESKRIPTIVE STATISTIKEN ---"
# Gesamtübersicht
gender_descriptives <- flow_scores_gender %>%
  group_by(gender_comp) %>%
  dplyr::summarise(
    n_observations = n(),
    n_participants = n_distinct(participant.code),
    n_teams = n_distinct(team_id),
    flow_mean = round(mean(flow_score, na.rm = TRUE), 3),
    flow_sd = round(sd(flow_score, na.rm = TRUE), 3),
    flow_min = round(min(flow_score, na.rm = TRUE), 3),
    flow_max = round(max(flow_score, na.rm = TRUE), 3),
    .groups = "drop"
  )

print("Übersicht nach Gender Composition:")
## [1] "Übersicht nach Gender Composition:"
print(gender_descriptives)
## # A tibble: 2 × 8
##   gender_comp n_observations n_participants n_teams flow_mean flow_sd flow_min
##   <chr>                <int>          <int>   <int>     <dbl>   <dbl>    <dbl>
## 1 all_male               268             39      13      5.54   0.978        3
## 2 mixed                  537             81      27      5.32   1.04         3
## # ℹ 1 more variable: flow_max <dbl>
# Nach Task aufgeschlüsselt
gender_task_descriptives <- flow_scores_gender %>%
  group_by(gender_comp, task) %>%
  dplyr::summarise(
    n = n(),
    flow_mean = round(mean(flow_score, na.rm = TRUE), 3),
    flow_sd = round(sd(flow_score, na.rm = TRUE), 3),
    .groups = "drop"
  )

print("\nFlow nach Gender Composition und Task:")
## [1] "\nFlow nach Gender Composition und Task:"
print(gender_task_descriptives)
## # A tibble: 4 × 5
##   gender_comp task      n flow_mean flow_sd
##   <chr>       <chr> <int>     <dbl>   <dbl>
## 1 all_male    HP      115      5.46    1.04
## 2 all_male    Math    153      5.60    0.93
## 3 mixed       HP      230      5.31    1.05
## 4 mixed       Math    307      5.32    1.04
# Nach Communication aufgeschlüsselt
gender_comm_descriptives <- flow_scores_gender %>%
  group_by(gender_comp, comm) %>%
  dplyr::summarise(
    n = n(),
    flow_mean = round(mean(flow_score, na.rm = TRUE), 3),
    flow_sd = round(sd(flow_score, na.rm = TRUE), 3),
    .groups = "drop"
  )

print("\nFlow nach Gender Composition und Communication:")
## [1] "\nFlow nach Gender Composition und Communication:"
print(gender_comm_descriptives)
## # A tibble: 4 × 5
##   gender_comp comm      n flow_mean flow_sd
##   <chr>       <chr> <int>     <dbl>   <dbl>
## 1 all_male    Chat    144      5.37   0.996
## 2 all_male    Jitsi   124      5.75   0.92 
## 3 mixed       Chat    260      5.32   1.06 
## 4 mixed       Jitsi   277      5.32   1.03
# TEIL 3: STATISTISCHE TESTS
# ================================================================================

print("\n--- STATISTISCHE TESTS ---")
## [1] "\n--- STATISTISCHE TESTS ---"
# ANOVA für Haupteffekt
print("ANOVA: Gender Composition Haupteffekt")
## [1] "ANOVA: Gender Composition Haupteffekt"
anova_gender <- aov(flow_score ~ gender_comp, data = flow_scores_gender)
print(summary(anova_gender))
##              Df Sum Sq Mean Sq F value  Pr(>F)   
## gender_comp   1    9.0   8.975   8.589 0.00348 **
## Residuals   803  839.1   1.045                   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# Post-hoc Test falls signifikant
anova_p <- summary(anova_gender)[[1]][["Pr(>F)"]][1]
if(!is.na(anova_p) && anova_p < 0.05) {
  print("\nPost-hoc Test (Tukey HSD):")
  print(TukeyHSD(anova_gender))
}
## [1] "\nPost-hoc Test (Tukey HSD):"
##   Tukey multiple comparisons of means
##     95% family-wise confidence level
## 
## Fit: aov(formula = flow_score ~ gender_comp, data = flow_scores_gender)
## 
## $gender_comp
##                      diff        lwr         upr     p adj
## mixed-all_male -0.2240613 -0.3741359 -0.07398658 0.0034786
# T-Test: all_male vs. mixed (falls beide vorhanden)
if("all_male" %in% flow_scores_gender$gender_comp && "mixed" %in% flow_scores_gender$gender_comp) {
  print("\nT-Test: all_male vs. mixed")
  t_test_result <- t.test(
    flow_score ~ gender_comp, 
    data = flow_scores_gender %>% filter(gender_comp %in% c("all_male", "mixed"))
  )
  print(t_test_result)
}
## [1] "\nT-Test: all_male vs. mixed"
## 
##  Welch Two Sample t-test
## 
## data:  flow_score by gender_comp
## t = 2.9944, df = 565.42, p-value = 0.00287
## alternative hypothesis: true difference in means between group all_male and group mixed is not equal to 0
## 95 percent confidence interval:
##  0.07708914 0.37103338
## sample estimates:
## mean in group all_male    mean in group mixed 
##               5.543118               5.319056
# TEIL 4: LINEARE MODELLE
# ================================================================================

print("\n--- LINEARE MODELLE ---")
## [1] "\n--- LINEARE MODELLE ---"
# Basis-Modell
print("Model 1: Nur Gender Composition")
## [1] "Model 1: Nur Gender Composition"
model1 <- lm(flow_score ~ gender_comp, data = flow_scores_gender)
print(summary(model1))
## 
## Call:
## lm(formula = flow_score ~ gender_comp, data = flow_scores_gender)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.5431 -0.7635  0.1254  0.7921  1.6809 
## 
## Coefficients:
##                  Estimate Std. Error t value Pr(>|t|)    
## (Intercept)       5.54312    0.06244  88.769  < 2e-16 ***
## gender_compmixed -0.22406    0.07645  -2.931  0.00348 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.022 on 803 degrees of freedom
## Multiple R-squared:  0.01058,    Adjusted R-squared:  0.00935 
## F-statistic: 8.589 on 1 and 803 DF,  p-value: 0.003479
# Mit Task
print("\nModel 2: Gender Composition + Task")
## [1] "\nModel 2: Gender Composition + Task"
model2 <- lm(flow_score ~ gender_comp + task, data = flow_scores_gender)
print(summary(model2))
## 
## Call:
## lm(formula = flow_score ~ gender_comp + task, data = flow_scores_gender)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.5667 -0.7871  0.1018  0.8216  1.7124 
## 
## Coefficients:
##                  Estimate Std. Error t value Pr(>|t|)    
## (Intercept)       5.51172    0.07503  73.457  < 2e-16 ***
## gender_compmixed -0.22411    0.07648  -2.930  0.00348 ** 
## taskMath          0.05499    0.07283   0.755  0.45039    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.023 on 802 degrees of freedom
## Multiple R-squared:  0.01129,    Adjusted R-squared:  0.00882 
## F-statistic: 4.577 on 2 and 802 DF,  p-value: 0.01055
# Mit Communication
print("\nModel 3: Gender Composition + Communication")
## [1] "\nModel 3: Gender Composition + Communication"
model3 <- lm(flow_score ~ gender_comp + comm, data = flow_scores_gender)
print(summary(model3))
## 
## Call:
## lm(formula = flow_score ~ gender_comp + comm, data = flow_scores_gender)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.48563 -0.81052  0.07837  0.83457  1.74503 
## 
## Coefficients:
##                  Estimate Std. Error t value Pr(>|t|)    
## (Intercept)       5.48563    0.07072  77.567  < 2e-16 ***
## gender_compmixed -0.23066    0.07646  -3.017  0.00263 ** 
## commJitsi         0.12425    0.07206   1.724  0.08507 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 1.021 on 802 degrees of freedom
## Multiple R-squared:  0.01424,    Adjusted R-squared:  0.01178 
## F-statistic: 5.791 on 2 and 802 DF,  p-value: 0.003183
# Mit Difficulty
print("\nModel 4: Gender Composition + Difficulty")
## [1] "\nModel 4: Gender Composition + Difficulty"
model4 <- lm(flow_score ~ gender_comp + difficulty, data = flow_scores_gender)
print(summary(model4))
## 
## Call:
## lm(formula = flow_score ~ gender_comp + difficulty, data = flow_scores_gender)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.46752 -0.70524  0.07867  0.75092  2.07254 
## 
## Coefficients:
##                              Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                   5.62831    0.08188  68.735  < 2e-16 ***
## gender_compmixed             -0.23158    0.07432  -3.116   0.0019 ** 
## difficultyHard               -0.46927    0.09353  -5.017 6.47e-07 ***
## difficultyMedium             -0.03654    0.11315  -0.323   0.7468    
## difficultyOptimal_Calibrated  0.17070    0.11187   1.526   0.1274    
## difficultyOptimal_Selected    0.18190    0.11187   1.626   0.1043    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9936 on 799 degrees of freedom
## Multiple R-squared:  0.06998,    Adjusted R-squared:  0.06416 
## F-statistic: 12.02 on 5 and 799 DF,  p-value: 3.031e-11
# Volles Modell
print("\nModel 5: Vollmodell")
## [1] "\nModel 5: Vollmodell"
model5 <- lm(flow_score ~ gender_comp + task + comm + difficulty, data = flow_scores_gender)
print(summary(model5))
## 
## Call:
## lm(formula = flow_score ~ gender_comp + task + comm + difficulty, 
##     data = flow_scores_gender)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.53677 -0.75048  0.09463  0.78605  2.21962 
## 
## Coefficients:
##                              Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                   5.65033    0.09941  56.836  < 2e-16 ***
## gender_compmixed             -0.23855    0.07422  -3.214  0.00136 ** 
## taskMath                     -0.15998    0.09325  -1.716  0.08661 .  
## commJitsi                     0.12499    0.06995   1.787  0.07433 .  
## difficultyHard               -0.47142    0.09329  -5.053 5.39e-07 ***
## difficultyMedium             -0.11693    0.12195  -0.959  0.33794    
## difficultyOptimal_Calibrated  0.25137    0.12108   2.076  0.03821 *  
## difficultyOptimal_Selected    0.26258    0.12108   2.169  0.03041 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.991 on 797 degrees of freedom
## Multiple R-squared:  0.07716,    Adjusted R-squared:  0.06905 
## F-statistic:  9.52 on 7 and 797 DF,  p-value: 2.179e-11
# Mit Interaktionen
print("\nModel 6: Mit Gender × Task Interaktion")
## [1] "\nModel 6: Mit Gender × Task Interaktion"
model6 <- lm(flow_score ~ gender_comp * task + comm + difficulty, data = flow_scores_gender)
print(summary(model6))
## 
## Call:
## lm(formula = flow_score ~ gender_comp * task + comm + difficulty, 
##     data = flow_scores_gender)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.56343 -0.73102  0.09602  0.76818  2.23988 
## 
## Coefficients:
##                              Estimate Std. Error t value Pr(>|t|)    
## (Intercept)                   5.59778    0.11450  48.890  < 2e-16 ***
## gender_compmixed             -0.15939    0.11325  -1.407   0.1597    
## taskMath                     -0.06795    0.13632  -0.498   0.6183    
## commJitsi                     0.12504    0.06995   1.788   0.0742 .  
## difficultyHard               -0.47170    0.09330  -5.056 5.33e-07 ***
## difficultyMedium             -0.11741    0.12197  -0.963   0.3360    
## difficultyOptimal_Calibrated  0.25184    0.12109   2.080   0.0379 *  
## difficultyOptimal_Selected    0.26305    0.12109   2.172   0.0301 *  
## gender_compmixed:taskMath    -0.13862    0.14977  -0.926   0.3550    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9911 on 796 degrees of freedom
## Multiple R-squared:  0.07815,    Adjusted R-squared:  0.06889 
## F-statistic: 8.435 on 8 and 796 DF,  p-value: 4.789e-11
# TEIL 5: VISUALISIERUNGEN
# ================================================================================

print("\n--- VISUALISIERUNGEN ---")
## [1] "\n--- VISUALISIERUNGEN ---"
# Einfacher Boxplot
p1 <- ggplot(flow_scores_gender, aes(x = gender_comp, y = flow_score, fill = gender_comp)) +
  geom_boxplot(alpha = 0.7) +
  geom_jitter(width = 0.2, alpha = 0.3, size = 0.5) +
  labs(
    title = "Flow Scores by Team Gender Composition",
    x = "Team Gender Composition",
    y = "Flow Score",
    fill = "Gender Composition"
  ) +
  theme_minimal() +
  scale_fill_brewer(palette = "Set2") +
  theme(legend.position = "none")

print(p1)

# Nach Task aufgeteilt
p2 <- ggplot(flow_scores_gender, aes(x = gender_comp, y = flow_score, fill = gender_comp)) +
  geom_boxplot(alpha = 0.7) +
  facet_wrap(~ task) +
  labs(
    title = "Flow Scores by Gender Composition and Task",
    x = "Team Gender Composition",
    y = "Flow Score",
    fill = "Gender Composition"
  ) +
  theme_minimal() +
  scale_fill_brewer(palette = "Set2") +
  theme(axis.text.x = element_text(angle = 45, hjust = 1))

print(p2)

# Nach Communication aufgeteilt
p3 <- ggplot(flow_scores_gender, aes(x = gender_comp, y = flow_score, fill = gender_comp)) +
  geom_boxplot(alpha = 0.7) +
  facet_wrap(~ comm) +
  labs(
    title = "Flow Scores by Gender Composition and Communication",
    x = "Team Gender Composition",
    y = "Flow Score",
    fill = "Gender Composition"
  ) +
  theme_minimal() +
  scale_fill_brewer(palette = "Set2") +
  theme(axis.text.x = element_text(angle = 45, hjust = 1))

print(p3)

# Interaction Plot: Gender × Task
if(require(emmeans, quietly = TRUE)) {
  print("\n--- INTERACTION PLOTS ---")
  
  # Gender × Task
  emmeans_gender_task <- emmeans(model6, ~ gender_comp | task)
  p4 <- plot(emmeans_gender_task) +
    labs(title = "Estimated Marginal Means: Gender Composition by Task") +
    theme_minimal()
  print(p4)
}
## [1] "\n--- INTERACTION PLOTS ---"

# TEIL 6: EFFEKTSTÄRKEN
# ================================================================================

print("\n--- EFFEKTSTÄRKEN ---")
## [1] "\n--- EFFEKTSTÄRKEN ---"
# Eta-squared für Gender Composition
if(require(effectsize, quietly = TRUE)) {
  eta_gender <- eta_squared(anova_gender)
  print("Eta-squared für Gender Composition:")
  print(eta_gender)
}
## For one-way between subjects designs, partial eta squared is equivalent
##   to eta squared. Returning eta squared.
## [1] "Eta-squared für Gender Composition:"
## # Effect Size for ANOVA
## 
## Parameter   | Eta2 |       95% CI
## ---------------------------------
## gender_comp | 0.01 | [0.00, 1.00]
## 
## - One-sided CIs: upper bound fixed at [1.00].
# Cohen's d für paarweise Vergleiche (falls vorhanden)
if("all_male" %in% flow_scores_gender$gender_comp && "mixed" %in% flow_scores_gender$gender_comp) {
  if(require(effsize, quietly = TRUE)) {
    cohens_d <- cohen.d(
      flow_score ~ gender_comp, 
      data = flow_scores_gender %>% filter(gender_comp %in% c("all_male", "mixed"))
    )
    print("\nCohen's d (all_male vs. mixed):")
    print(cohens_d)
  }
}
## Warning: Paket 'effsize' wurde unter R Version 4.2.3 erstellt
## 
## Attache Paket: 'effsize'
## Das folgende Objekt ist maskiert 'package:psych':
## 
##     cohen.d
## [1] "\nCohen's d (all_male vs. mixed):"
## 
## Cohen's d
## 
## d estimate: 0.2191825 (small)
## 95 percent confidence interval:
##      lower      upper 
## 0.07198454 0.36638052
# TEIL 7: ZUSAMMENFASSUNG UND INTERPRETATION
# ================================================================================

print("\n=== ZUSAMMENFASSUNG ===")
## [1] "\n=== ZUSAMMENFASSUNG ==="
# Mittelwerte der wichtigsten Gruppen
summary_stats <- flow_scores_gender %>%
  group_by(gender_comp) %>%
  dplyr::summarise(
    mean_flow = round(mean(flow_score, na.rm = TRUE), 3),
    n = n(),
    .groups = "drop"
  ) %>%
  arrange(desc(mean_flow))

print("Ranking der Gender Compositions nach Flow (höchster zuerst):")
## [1] "Ranking der Gender Compositions nach Flow (höchster zuerst):"
print(summary_stats)
## # A tibble: 2 × 3
##   gender_comp mean_flow     n
##   <chr>           <dbl> <int>
## 1 all_male         5.54   268
## 2 mixed            5.32   537
# Signifikanz-Check
if(!is.na(anova_p)) {
  if(anova_p < 0.001) {
    significance <- "hochsignifikant (p < 0.001)"
  } else if(anova_p < 0.01) {
    significance <- "sehr signifikant (p < 0.01)"
  } else if(anova_p < 0.05) {
    significance <- "signifikant (p < 0.05)"
  } else {
    significance <- "nicht signifikant (p >= 0.05)"
  }
  
  print(paste("\nGender Composition Effekt ist", significance))
}
## [1] "\nGender Composition Effekt ist sehr signifikant (p < 0.01)"
# Praktische Bedeutung
if(nrow(summary_stats) >= 2) {
  max_diff <- max(summary_stats$mean_flow) - min(summary_stats$mean_flow)
  print(paste("Maximaler Unterschied zwischen Gruppen:", round(max_diff, 3), "Flow-Punkte"))
}
## [1] "Maximaler Unterschied zwischen Gruppen: 0.224 Flow-Punkte"
print("\n=== GENDER COMPOSITION ANALYSE ABGESCHLOSSEN ===")
## [1] "\n=== GENDER COMPOSITION ANALYSE ABGESCHLOSSEN ==="

Regression of flow on emoji count

library(stringr)

# TEIL 1: EMOJI-EXTRAKTION FUNKTIONEN
# ================================================================================

# Definiere die Text-Emojis, die wir suchen
emoji_patterns <- c(
  # Positive Emojis
  ":\\)", ";\\)", ":D", ";D", "[xX]D", "XD", "<3", ":P", ":p",
  # Negative/Neutrale Emojis  
  ":/", ":\\(", ":\\|", ":o", ":O",
  # Varianten mit Bindestrich
  ":-\\)", ";-\\)", ":-D", ";-D", ":-/", ":-\\(", ":-\\|", ":-[oO]", ":-[pP]"
)

# Funktion zum Extrahieren spezifischer Emoji-Typen
extract_emoji_types <- function(text) {
  if(is.na(text) || text == "") {
    return(list(
      positive = 0,
      negative = 0,
      neutral = 0,
      total = 0
    ))
  }
  
  # Positive Emojis
  positive_pattern <- "(:\\)|;\\)|:D|;D|[xX]D|XD|<3|:P|:p|:-\\)|;-\\)|:-D|;-D|:-[pP])"
  positive_count <- length(str_extract_all(text, positive_pattern)[[1]])
  
  # Negative Emojis
  negative_pattern <- "(:\\(|:-\\()"
  negative_count <- length(str_extract_all(text, negative_pattern)[[1]])
  
  # Neutrale Emojis
  neutral_pattern <- "(:/|:\\||:o|:O|:-/|:-\\||:-[oO])"
  neutral_count <- length(str_extract_all(text, neutral_pattern)[[1]])
  
  return(list(
    positive = positive_count,
    negative = negative_count,
    neutral = neutral_count,
    total = positive_count + negative_count + neutral_count
  ))
}

# TEIL 2: EMOJI-DATEN EXTRAHIEREN
# ================================================================================

print("--- EXTRAHIERE EMOJI-DATEN ---")
## [1] "--- EXTRAHIERE EMOJI-DATEN ---"
# Finde alle Chat-Log Spalten
chat_columns <- names(data)[grepl("chat_log$", names(data))]
print(paste("Gefundene Chat-Log Spalten:", length(chat_columns)))
## [1] "Gefundene Chat-Log Spalten: 9"
# Extrahiere Emojis aus allen Chat-Logs
all_emoji_data <- data.frame()

for(col in chat_columns) {
  # Bestimme Task und Communication Type
  task_type <- case_when(
    grepl("mathChat", col) ~ "Math_Chat",
    grepl("mathJitsi", col) ~ "Math_Jitsi", 
    grepl("HiddenProfile_Chat", col) ~ "HP_Chat",
    grepl("HiddenProfile_Jitsi", col) ~ "HP_Jitsi",
    TRUE ~ "Unknown"
  )
  
  # Nur Chat-Logs (nicht Jitsi)
  if(grepl("Chat", task_type)) {
    round_num <- as.numeric(str_extract(col, "\\d+"))
    
    col_data <- data %>%
      dplyr::select(participant.code, team_id, chat_log = all_of(col)) %>%
      filter(!is.na(chat_log) & chat_log != "") %>%
      mutate(
        round = round_num,
        task = ifelse(grepl("Math", task_type), "Math", "HP"),
        comm = "Chat",
        chat_length = nchar(chat_log)
      )
    
    # Emojis extrahieren
    for(i in 1:nrow(col_data)) {
      emoji_result <- extract_emoji_types(col_data$chat_log[i])
      col_data$emoji_positive[i] <- emoji_result$positive
      col_data$emoji_negative[i] <- emoji_result$negative
      col_data$emoji_neutral[i] <- emoji_result$neutral
      col_data$emoji_total[i] <- emoji_result$total
    }
    
    all_emoji_data <- bind_rows(all_emoji_data, col_data)
  }
}

print(paste("Extrahierte Emoji-Daten:", nrow(all_emoji_data), "Chat-Nachrichten"))
## [1] "Extrahierte Emoji-Daten: 388 Chat-Nachrichten"
# TEIL 3: AGGREGIERE EMOJI-DATEN PRO TEILNEHMER
# ================================================================================

# Pro Teilnehmer und Task
emoji_participant_summary <- all_emoji_data %>%
  group_by(participant.code, task) %>%
  dplyr::summarise(
    total_emojis = sum(emoji_total, na.rm = TRUE),
    positive_emojis = sum(emoji_positive, na.rm = TRUE),
    negative_emojis = sum(emoji_negative, na.rm = TRUE),
    neutral_emojis = sum(emoji_neutral, na.rm = TRUE),
    total_chat_length = sum(chat_length, na.rm = TRUE),
    emoji_messages = sum(emoji_total > 0),
    total_messages = n(),
    emoji_density = ifelse(total_chat_length > 0, total_emojis / total_chat_length * 100, 0),
    emoji_message_ratio = emoji_messages / total_messages,
    .groups = "drop"
  )

print("--- EMOJI-NUTZUNG ÜBERSICHT ---")
## [1] "--- EMOJI-NUTZUNG ÜBERSICHT ---"
print(paste("Teilnehmer mit Emoji-Daten:", nrow(emoji_participant_summary)))
## [1] "Teilnehmer mit Emoji-Daten: 118"
# TEIL 4: VERBINDE MIT FLOW-SCORES
# ================================================================================

# Flow-Scores für Chat-Bedingungen aggregieren
flow_chat_summary <- flow_clean %>%
  filter(comm == "Chat") %>%
  group_by(participant.code, task) %>%
  dplyr::summarise(
    mean_flow_score = mean(flow_score, na.rm = TRUE),
    n_flow_measurements = n(),
    .groups = "drop"
  )

# Verbinde Emoji- und Flow-Daten
emoji_flow_data <- emoji_participant_summary %>%
  left_join(flow_chat_summary, by = c("participant.code", "task")) %>%
  filter(!is.na(mean_flow_score))  # Nur Teilnehmer mit Flow-Daten

print(paste("Teilnehmer mit Emoji- und Flow-Daten:", nrow(emoji_flow_data)))
## [1] "Teilnehmer mit Emoji- und Flow-Daten: 118"
# TEIL 5: DESKRIPTIVE STATISTIKEN
# ================================================================================

print("\n--- DESKRIPTIVE STATISTIKEN ---")
## [1] "\n--- DESKRIPTIVE STATISTIKEN ---"
# Gesamtübersicht
emoji_overview <- emoji_flow_data %>%
  group_by(task) %>%
  dplyr::summarise(
    n_participants = n(),
    emoji_users = sum(total_emojis > 0),
    emoji_user_percentage = round(emoji_users / n_participants * 100, 1),
    mean_emojis = round(mean(total_emojis), 2),
    median_emojis = median(total_emojis),
    max_emojis = max(total_emojis),
    mean_positive = round(mean(positive_emojis), 2),
    mean_negative = round(mean(negative_emojis), 2),
    mean_neutral = round(mean(neutral_emojis), 2),
    mean_emoji_density = round(mean(emoji_density), 3),
    .groups = "drop"
  )

print("Emoji-Nutzung nach Task:")
## [1] "Emoji-Nutzung nach Task:"
print(emoji_overview)
## # A tibble: 2 × 11
##   task  n_participants emoji_users emoji_user_percentage mean_emojis
##   <chr>          <int>       <int>                 <dbl>       <dbl>
## 1 HP                60          15                  25          0.35
## 2 Math              58          13                  22.4        0.47
## # ℹ 6 more variables: median_emojis <dbl>, max_emojis <int>,
## #   mean_positive <dbl>, mean_negative <dbl>, mean_neutral <dbl>,
## #   mean_emoji_density <dbl>
# Verteilung der Emoji-Nutzer vs. Nicht-Nutzer
emoji_usage_summary <- emoji_flow_data %>%
  mutate(
    emoji_user = total_emojis > 0,
    emoji_category = case_when(
      total_emojis == 0 ~ "Keine Emojis",
      total_emojis <= 2 ~ "Wenige (1-2)",
      total_emojis <= 5 ~ "Moderate (3-5)",
      TRUE ~ "Viele (6+)"
    )
  ) %>%
  group_by(task, emoji_category) %>%
  dplyr::summarise(
    n = n(),
    mean_flow = round(mean(mean_flow_score, na.rm = TRUE), 3),
    .groups = "drop"
  )

print("\nFlow nach Emoji-Nutzungskategorien:")
## [1] "\nFlow nach Emoji-Nutzungskategorien:"
print(emoji_usage_summary)
## # A tibble: 7 × 4
##   task  emoji_category     n mean_flow
##   <chr> <chr>          <int>     <dbl>
## 1 HP    Keine Emojis      45      5.35
## 2 HP    Moderate (3-5)     2      4.91
## 3 HP    Wenige (1-2)      13      5.36
## 4 Math  Keine Emojis      45      5.37
## 5 Math  Moderate (3-5)     2      5.08
## 6 Math  Viele (6+)         1      4.61
## 7 Math  Wenige (1-2)      10      5.04
# TEIL 6: KORRELATIONEN UND STATISTISCHE TESTS
# ================================================================================

print("\n--- KORRELATIONEN ---")
## [1] "\n--- KORRELATIONEN ---"
# Korrelationen pro Task
for(task_name in c("Math", "HP")) {
  task_data <- emoji_flow_data %>% filter(task == task_name)
  
  if(nrow(task_data) > 5) {
    cor_total <- cor(task_data$total_emojis, task_data$mean_flow_score, use = "complete.obs")
    cor_positive <- cor(task_data$positive_emojis, task_data$mean_flow_score, use = "complete.obs")
    cor_density <- cor(task_data$emoji_density, task_data$mean_flow_score, use = "complete.obs")
    
    cat(paste("\n", task_name, "Task:\n"))
    cat(paste("  Total Emojis - Flow:", round(cor_total, 3), "\n"))
    cat(paste("  Positive Emojis - Flow:", round(cor_positive, 3), "\n"))
    cat(paste("  Emoji Density - Flow:", round(cor_density, 3), "\n"))
  }
}
## 
##  Math Task:
##   Total Emojis - Flow: -0.178 
##   Positive Emojis - Flow: -0.163 
##   Emoji Density - Flow: -0.043 
## 
##  HP Task:
##   Total Emojis - Flow: -0.093 
##   Positive Emojis - Flow: -0.093 
##   Emoji Density - Flow: -0.025
# TEIL 7: LINEARE MODELLE
# ================================================================================

print("\n--- LINEARE MODELLE ---")
## [1] "\n--- LINEARE MODELLE ---"
# Modell 1: Emoji-Anzahl gesamt
model1 <- lm(mean_flow_score ~ total_emojis + task, data = emoji_flow_data)
print("Model 1: Total Emojis -> Flow")
## [1] "Model 1: Total Emojis -> Flow"
print(summary(model1))
## 
## Call:
## lm(formula = mean_flow_score ~ total_emojis + task, data = emoji_flow_data)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -2.3266 -0.5952  0.1024  0.7243  1.6179 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept)   5.38213    0.11865  45.360   <2e-16 ***
## total_emojis -0.12002    0.08481  -1.415    0.160    
## taskMath     -0.03667    0.16415  -0.223    0.824    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8899 on 115 degrees of freedom
## Multiple R-squared:  0.01791,    Adjusted R-squared:  0.0008325 
## F-statistic: 1.049 on 2 and 115 DF,  p-value: 0.3537
# Modell 2: Emoji-Typen
model2 <- lm(mean_flow_score ~ positive_emojis + negative_emojis + neutral_emojis + task, 
             data = emoji_flow_data)
print("\nModel 2: Emoji Types -> Flow")
## [1] "\nModel 2: Emoji Types -> Flow"
print(summary(model2))
## 
## Call:
## lm(formula = mean_flow_score ~ positive_emojis + negative_emojis + 
##     neutral_emojis + task, data = emoji_flow_data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.32613 -0.56209  0.09015  0.69980  1.61831 
## 
## Coefficients:
##                 Estimate Std. Error t value Pr(>|t|)    
## (Intercept)      5.38169    0.11923  45.136   <2e-16 ***
## positive_emojis -0.11876    0.09205  -1.290    0.200    
## negative_emojis -0.68823    0.53162  -1.295    0.198    
## neutral_emojis   0.49152    0.64995   0.756    0.451    
## taskMath        -0.02840    0.16768  -0.169    0.866    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8892 on 113 degrees of freedom
## Multiple R-squared:  0.03639,    Adjusted R-squared:  0.002282 
## F-statistic: 1.067 on 4 and 113 DF,  p-value: 0.3763
# Modell 3: Emoji-Density
model3 <- lm(mean_flow_score ~ emoji_density + task, data = emoji_flow_data)
print("\nModel 3: Emoji Density -> Flow")
## [1] "\nModel 3: Emoji Density -> Flow"
print(summary(model3))
## 
## Call:
## lm(formula = mean_flow_score ~ emoji_density + task, data = emoji_flow_data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.28731 -0.62064  0.08819  0.72249  1.65714 
## 
## Coefficients:
##               Estimate Std. Error t value Pr(>|t|)    
## (Intercept)    5.34286    0.11616  45.996   <2e-16 ***
## emoji_density -0.10699    0.34216  -0.313    0.755    
## taskMath      -0.03918    0.16916  -0.232    0.817    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8972 on 115 degrees of freedom
## Multiple R-squared:  0.001661,   Adjusted R-squared:  -0.0157 
## F-statistic: 0.09566 on 2 and 115 DF,  p-value: 0.9088
# Modell 4: Emoji vs. Non-Emoji Users
emoji_flow_data$emoji_user <- emoji_flow_data$total_emojis > 0
model4 <- lm(mean_flow_score ~ emoji_user + task, data = emoji_flow_data)
print("\nModel 4: Emoji User (Yes/No) -> Flow")
## [1] "\nModel 4: Emoji User (Yes/No) -> Flow"
print(summary(model4))
## 
## Call:
## lm(formula = mean_flow_score ~ emoji_user + task, data = emoji_flow_data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.33334 -0.60186  0.08333  0.72221  1.61110 
## 
## Coefficients:
##                Estimate Std. Error t value Pr(>|t|)    
## (Intercept)     5.38890    0.12510  43.078   <2e-16 ***
## emoji_userTRUE -0.19511    0.19346  -1.008    0.315    
## taskMath       -0.05558    0.16463  -0.338    0.736    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.8936 on 115 degrees of freedom
## Multiple R-squared:  0.009571,   Adjusted R-squared:  -0.007653 
## F-statistic: 0.5557 on 2 and 115 DF,  p-value: 0.5752
# TEIL 8: TASK-SPEZIFISCHE ANALYSEN
# ================================================================================

print("\n--- TASK-SPEZIFISCHE ANALYSEN ---")
## [1] "\n--- TASK-SPEZIFISCHE ANALYSEN ---"
# Math Task
math_data <- emoji_flow_data %>% filter(task == "Math")
if(nrow(math_data) > 10) {
  math_model <- lm(mean_flow_score ~ total_emojis, data = math_data)
  print("Math Task - Emojis -> Flow:")
  print(summary(math_model))
}
## [1] "Math Task - Emojis -> Flow:"
## 
## Call:
## lm(formula = mean_flow_score ~ total_emojis, data = math_data)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -1.9825 -0.3967  0.1194  0.5870  1.4897 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept)   5.34358    0.10872  49.149   <2e-16 ***
## total_emojis -0.11598    0.08586  -1.351    0.182    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.77 on 56 degrees of freedom
## Multiple R-squared:  0.03155,    Adjusted R-squared:  0.01426 
## F-statistic: 1.825 on 1 and 56 DF,  p-value: 0.1822
# HP Task
hp_data <- emoji_flow_data %>% filter(task == "HP")
if(nrow(hp_data) > 10) {
  hp_model <- lm(mean_flow_score ~ total_emojis, data = hp_data)
  print("\nHP Task - Emojis -> Flow:")
  print(summary(hp_model))
}
## [1] "\nHP Task - Emojis -> Flow:"
## 
## Call:
## lm(formula = mean_flow_score ~ total_emojis, data = hp_data)
## 
## Residuals:
##      Min       1Q   Median       3Q      Max 
## -2.33041 -0.73782  0.02144  0.77279  1.61404 
## 
## Coefficients:
##              Estimate Std. Error t value Pr(>|t|)    
## (Intercept)    5.3860     0.1440  37.393   <2e-16 ***
## total_emojis  -0.1310     0.1834  -0.714    0.478    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 0.9988 on 58 degrees of freedom
## Multiple R-squared:  0.008714,   Adjusted R-squared:  -0.008377 
## F-statistic: 0.5099 on 1 and 58 DF,  p-value: 0.4781
# TEIL 9: VISUALISIERUNGEN
# ================================================================================

print("\n--- VISUALISIERUNGEN ---")
## [1] "\n--- VISUALISIERUNGEN ---"
# Plot 1: Emoji-Anzahl vs Flow
p1 <- ggplot(emoji_flow_data, aes(x = total_emojis, y = mean_flow_score)) +
  geom_point(alpha = 0.6, size = 2) +
  geom_smooth(method = "lm", se = TRUE, color = "blue") +
  facet_wrap(~ task) +
  labs(
    title = "Emoji Usage vs. Flow Score",
    x = "Total Emojis Used",
    y = "Mean Flow Score"
  ) +
  theme_minimal()

print(p1)
## `geom_smooth()` using formula = 'y ~ x'

# Plot 2: Emoji-Kategorien vs Flow
p2 <- emoji_flow_data %>%
  mutate(
    emoji_category = case_when(
      total_emojis == 0 ~ "Keine",
      total_emojis <= 2 ~ "Wenige (1-2)",
      total_emojis <= 5 ~ "Moderate (3-5)",
      TRUE ~ "Viele (6+)"
    )
  ) %>%
  ggplot(aes(x = emoji_category, y = mean_flow_score, fill = emoji_category)) +
  geom_boxplot(alpha = 0.7) +
  geom_jitter(width = 0.2, alpha = 0.5) +
  facet_wrap(~ task) +
  labs(
    title = "Flow Score by Emoji Usage Category",
    x = "Emoji Usage Category",
    y = "Mean Flow Score",
    fill = "Category"
  ) +
  theme_minimal() +
  theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position = "none")

print(p2)

# Plot 3: Emoji-Typen Verteilung
p3 <- emoji_flow_data %>%
  dplyr::select(participant.code, task, positive_emojis, negative_emojis, neutral_emojis) %>%
  pivot_longer(cols = c(positive_emojis, negative_emojis, neutral_emojis),
               names_to = "emoji_type", values_to = "count") %>%
  mutate(emoji_type = str_replace(emoji_type, "_emojis", "")) %>%
  filter(count > 0) %>%
  ggplot(aes(x = emoji_type, y = count, fill = emoji_type)) +
  geom_boxplot(alpha = 0.7) +
  facet_wrap(~ task) +
  labs(
    title = "Distribution of Emoji Types (Only Users)",
    x = "Emoji Type",
    y = "Count"
  ) +
  theme_minimal() +
  scale_fill_brewer(palette = "Set2")

print(p3)

# TEIL 10: BEISPIELE UND ZUSAMMENFASSUNG
# ================================================================================

print("\n--- TOP EMOJI-NUTZER ---")
## [1] "\n--- TOP EMOJI-NUTZER ---"
# Top 5 Emoji-Nutzer pro Task
top_emoji_users <- emoji_flow_data %>%
  group_by(task) %>%
  slice_max(total_emojis, n = 3) %>%
  dplyr::select(participant.code, task, total_emojis, positive_emojis, 
         negative_emojis, neutral_emojis, mean_flow_score)

print("Top 3 Emoji-Nutzer pro Task:")
## [1] "Top 3 Emoji-Nutzer pro Task:"
print(as.data.frame(top_emoji_users))
##   participant.code task total_emojis positive_emojis negative_emojis
## 1         bgceom04   HP            3               3               0
## 2         v1jh5b59   HP            3               3               0
## 3         syy4qkr8   HP            2               2               0
## 4         x4q7sojo   HP            2               2               0
## 5         5clw7m9q Math            7               7               0
## 6         cszj8fr6 Math            4               3               1
## 7         vy3vevfc Math            3               2               0
##   neutral_emojis mean_flow_score
## 1              0        3.925926
## 2              0        5.888889
## 3              0        5.037037
## 4              0        4.074074
## 5              0        4.611111
## 6              0        4.805556
## 7              1        5.361111
# Zusammenfassung
print("\n--- ZUSAMMENFASSUNG ---")
## [1] "\n--- ZUSAMMENFASSUNG ---"
overall_cor <- cor(emoji_flow_data$total_emojis, emoji_flow_data$mean_flow_score, 
                  use = "complete.obs")
emoji_users_pct <- sum(emoji_flow_data$total_emojis > 0) / nrow(emoji_flow_data) * 100

cat(paste("Gesamtkorrelation Emojis-Flow:", round(overall_cor, 3), "\n"))
## Gesamtkorrelation Emojis-Flow: -0.132
cat(paste("Anteil Emoji-Nutzer:", round(emoji_users_pct, 1), "%\n"))
## Anteil Emoji-Nutzer: 23.7 %
cat(paste("Durchschnittlich", round(mean(emoji_flow_data$total_emojis), 1), "Emojis pro Person\n"))
## Durchschnittlich 0.4 Emojis pro Person
cat(paste("Maximum:", max(emoji_flow_data$total_emojis), "Emojis von einer Person\n"))
## Maximum: 7 Emojis von einer Person
# Signifikanz-Test
if(abs(overall_cor) > 0.1) {
  cor_test <- cor.test(emoji_flow_data$total_emojis, emoji_flow_data$mean_flow_score)
  if(cor_test$p.value < 0.05) {
    cat("Korrelation ist statistisch signifikant (p < 0.05)\n")
  } else {
    cat("Korrelation ist nicht statistisch signifikant\n")
  }
}
## Korrelation ist nicht statistisch signifikant
print("\n=== EMOJI-ANALYSE ABGESCHLOSSEN ===")
## [1] "\n=== EMOJI-ANALYSE ABGESCHLOSSEN ==="